blob: 0b74dc0ede9cf5f59c3cb23481293a056d9b7f88 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_api.c Packet scheduler API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 *
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/init.h>
25#include <linux/proc_fs.h>
26#include <linux/seq_file.h>
27#include <linux/kmod.h>
28#include <linux/list.h>
Patrick McHardy41794772007-03-16 01:19:15 -070029#include <linux/hrtimer.h>
Jarek Poplawski25bfcd52008-08-18 20:53:34 -070030#include <linux/lockdep.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020033#include <net/net_namespace.h>
Denis V. Lunevb8542722007-12-01 00:21:31 +110034#include <net/sock.h>
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070035#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <net/pkt_sched.h>
37
Tom Goff7316ae82010-03-19 15:40:13 +000038static int qdisc_notify(struct net *net, struct sk_buff *oskb,
39 struct nlmsghdr *n, u32 clid,
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 struct Qdisc *old, struct Qdisc *new);
Tom Goff7316ae82010-03-19 15:40:13 +000041static int tclass_notify(struct net *net, struct sk_buff *oskb,
42 struct nlmsghdr *n, struct Qdisc *q,
43 unsigned long cl, int event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45/*
46
47 Short review.
48 -------------
49
50 This file consists of two interrelated parts:
51
52 1. queueing disciplines manager frontend.
53 2. traffic classes manager frontend.
54
55 Generally, queueing discipline ("qdisc") is a black box,
56 which is able to enqueue packets and to dequeue them (when
57 device is ready to send something) in order and at times
58 determined by algorithm hidden in it.
59
60 qdisc's are divided to two categories:
61 - "queues", which have no internal structure visible from outside.
62 - "schedulers", which split all the packets to "traffic classes",
63 using "packet classifiers" (look at cls_api.c)
64
65 In turn, classes may have child qdiscs (as rule, queues)
66 attached to them etc. etc. etc.
67
68 The goal of the routines in this file is to translate
69 information supplied by user in the form of handles
70 to more intelligible for kernel form, to make some sanity
71 checks and part of work, which is common to all qdiscs
72 and to provide rtnetlink notifications.
73
74 All real intelligent work is done inside qdisc modules.
75
76
77
78 Every discipline has two major routines: enqueue and dequeue.
79
80 ---dequeue
81
82 dequeue usually returns a skb to send. It is allowed to return NULL,
83 but it does not mean that queue is empty, it just means that
84 discipline does not want to send anything this time.
85 Queue is really empty if q->q.qlen == 0.
86 For complicated disciplines with multiple queues q->q is not
87 real packet queue, but however q->q.qlen must be valid.
88
89 ---enqueue
90
91 enqueue returns 0, if packet was enqueued successfully.
92 If packet (this one or another one) was dropped, it returns
93 not zero error code.
94 NET_XMIT_DROP - this packet dropped
95 Expected action: do not backoff, but wait until queue will clear.
96 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
97 Expected action: backoff or ignore
98 NET_XMIT_POLICED - dropped by police.
99 Expected action: backoff or error to real-time apps.
100
101 Auxiliary routines:
102
Jarek Poplawski99c0db22008-10-31 00:45:27 -0700103 ---peek
104
105 like dequeue but without removing a packet from the queue
106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 ---reset
108
109 returns qdisc to initial state: purge all buffers, clear all
110 timers, counters (except for statistics) etc.
111
112 ---init
113
114 initializes newly created qdisc.
115
116 ---destroy
117
118 destroys resources allocated by init and during lifetime of qdisc.
119
120 ---change
121
122 changes qdisc parameters.
123 */
124
125/* Protects list of registered TC modules. It is pure SMP lock. */
126static DEFINE_RWLOCK(qdisc_mod_lock);
127
128
129/************************************************
130 * Queueing disciplines manipulation. *
131 ************************************************/
132
133
134/* The list of all installed queueing disciplines. */
135
136static struct Qdisc_ops *qdisc_base;
137
Zhi Yong Wu21eb2182014-01-01 04:34:51 +0800138/* Register/unregister queueing discipline */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
140int register_qdisc(struct Qdisc_ops *qops)
141{
142 struct Qdisc_ops *q, **qp;
143 int rc = -EEXIST;
144
145 write_lock(&qdisc_mod_lock);
146 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
147 if (!strcmp(qops->id, q->id))
148 goto out;
149
150 if (qops->enqueue == NULL)
151 qops->enqueue = noop_qdisc_ops.enqueue;
Jarek Poplawski99c0db22008-10-31 00:45:27 -0700152 if (qops->peek == NULL) {
Jarek Poplawski68fd26b2010-08-09 12:18:48 +0000153 if (qops->dequeue == NULL)
Jarek Poplawski99c0db22008-10-31 00:45:27 -0700154 qops->peek = noop_qdisc_ops.peek;
Jarek Poplawski68fd26b2010-08-09 12:18:48 +0000155 else
156 goto out_einval;
Jarek Poplawski99c0db22008-10-31 00:45:27 -0700157 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 if (qops->dequeue == NULL)
159 qops->dequeue = noop_qdisc_ops.dequeue;
160
Jarek Poplawski68fd26b2010-08-09 12:18:48 +0000161 if (qops->cl_ops) {
162 const struct Qdisc_class_ops *cops = qops->cl_ops;
163
Jarek Poplawski3e9e5a52010-08-10 22:31:20 +0000164 if (!(cops->get && cops->put && cops->walk && cops->leaf))
Jarek Poplawski68fd26b2010-08-09 12:18:48 +0000165 goto out_einval;
166
167 if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
168 goto out_einval;
169 }
170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 qops->next = NULL;
172 *qp = qops;
173 rc = 0;
174out:
175 write_unlock(&qdisc_mod_lock);
176 return rc;
Jarek Poplawski68fd26b2010-08-09 12:18:48 +0000177
178out_einval:
179 rc = -EINVAL;
180 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800182EXPORT_SYMBOL(register_qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
184int unregister_qdisc(struct Qdisc_ops *qops)
185{
186 struct Qdisc_ops *q, **qp;
187 int err = -ENOENT;
188
189 write_lock(&qdisc_mod_lock);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000190 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 if (q == qops)
192 break;
193 if (q) {
194 *qp = q->next;
195 q->next = NULL;
196 err = 0;
197 }
198 write_unlock(&qdisc_mod_lock);
199 return err;
200}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800201EXPORT_SYMBOL(unregister_qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
stephen hemminger6da7c8f2013-08-27 16:19:08 -0700203/* Get default qdisc if not otherwise specified */
204void qdisc_get_default(char *name, size_t len)
205{
206 read_lock(&qdisc_mod_lock);
207 strlcpy(name, default_qdisc_ops->id, len);
208 read_unlock(&qdisc_mod_lock);
209}
210
211static struct Qdisc_ops *qdisc_lookup_default(const char *name)
212{
213 struct Qdisc_ops *q = NULL;
214
215 for (q = qdisc_base; q; q = q->next) {
216 if (!strcmp(name, q->id)) {
217 if (!try_module_get(q->owner))
218 q = NULL;
219 break;
220 }
221 }
222
223 return q;
224}
225
226/* Set new default qdisc to use */
227int qdisc_set_default(const char *name)
228{
229 const struct Qdisc_ops *ops;
230
231 if (!capable(CAP_NET_ADMIN))
232 return -EPERM;
233
234 write_lock(&qdisc_mod_lock);
235 ops = qdisc_lookup_default(name);
236 if (!ops) {
237 /* Not found, drop lock and try to load module */
238 write_unlock(&qdisc_mod_lock);
239 request_module("sch_%s", name);
240 write_lock(&qdisc_mod_lock);
241
242 ops = qdisc_lookup_default(name);
243 }
244
245 if (ops) {
246 /* Set new default */
247 module_put(default_qdisc_ops->owner);
248 default_qdisc_ops = ops;
249 }
250 write_unlock(&qdisc_mod_lock);
251
252 return ops ? 0 : -ENOENT;
253}
254
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255/* We know handle. Find qdisc among all qdisc's attached to device
256 (root qdisc, all its children, children of children etc.)
257 */
258
Hannes Eder6113b742008-11-28 03:06:46 -0800259static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
David S. Miller8123b422008-08-08 23:23:39 -0700260{
261 struct Qdisc *q;
262
263 if (!(root->flags & TCQ_F_BUILTIN) &&
264 root->handle == handle)
265 return root;
266
267 list_for_each_entry(q, &root->list, list) {
268 if (q->handle == handle)
269 return q;
270 }
271 return NULL;
272}
273
Eric Dumazet95dc1922013-12-05 11:12:02 -0800274void qdisc_list_add(struct Qdisc *q)
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700275{
Eric Dumazet37314362014-03-08 08:01:19 -0800276 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
277 struct Qdisc *root = qdisc_dev(q)->qdisc;
Eric Dumazete57a7842013-12-12 15:41:56 -0800278
Eric Dumazet37314362014-03-08 08:01:19 -0800279 WARN_ON_ONCE(root == &noop_qdisc);
Eric Dumazete57a7842013-12-12 15:41:56 -0800280 list_add_tail(&q->list, &root->list);
Eric Dumazet37314362014-03-08 08:01:19 -0800281 }
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700282}
Eric Dumazet95dc1922013-12-05 11:12:02 -0800283EXPORT_SYMBOL(qdisc_list_add);
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700284
285void qdisc_list_del(struct Qdisc *q)
286{
Jarek Poplawskif6486d42008-11-25 13:56:06 -0800287 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700288 list_del(&q->list);
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700289}
290EXPORT_SYMBOL(qdisc_list_del);
291
David S. Milleread81cc2008-07-17 00:50:32 -0700292struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
Patrick McHardy43effa12006-11-29 17:35:48 -0800293{
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700294 struct Qdisc *q;
295
Patrick McHardyaf356af2009-09-04 06:41:18 +0000296 q = qdisc_match_from_root(dev->qdisc, handle);
297 if (q)
298 goto out;
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700299
Eric Dumazet24824a02010-10-02 06:11:55 +0000300 if (dev_ingress_queue(dev))
301 q = qdisc_match_from_root(
302 dev_ingress_queue(dev)->qdisc_sleeping,
303 handle);
Jarek Poplawskif6486d42008-11-25 13:56:06 -0800304out:
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700305 return q;
Patrick McHardy43effa12006-11-29 17:35:48 -0800306}
307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
309{
310 unsigned long cl;
311 struct Qdisc *leaf;
Eric Dumazet20fea082007-11-14 01:44:41 -0800312 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
314 if (cops == NULL)
315 return NULL;
316 cl = cops->get(p, classid);
317
318 if (cl == 0)
319 return NULL;
320 leaf = cops->leaf(p, cl);
321 cops->put(p, cl);
322 return leaf;
323}
324
325/* Find queueing discipline by name */
326
Patrick McHardy1e904742008-01-22 22:11:17 -0800327static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328{
329 struct Qdisc_ops *q = NULL;
330
331 if (kind) {
332 read_lock(&qdisc_mod_lock);
333 for (q = qdisc_base; q; q = q->next) {
Patrick McHardy1e904742008-01-22 22:11:17 -0800334 if (nla_strcmp(kind, q->id) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 if (!try_module_get(q->owner))
336 q = NULL;
337 break;
338 }
339 }
340 read_unlock(&qdisc_mod_lock);
341 }
342 return q;
343}
344
Jesper Dangaard Brouer8a8e3d82013-08-14 23:47:11 +0200345/* The linklayer setting were not transferred from iproute2, in older
346 * versions, and the rate tables lookup systems have been dropped in
347 * the kernel. To keep backward compatible with older iproute2 tc
348 * utils, we detect the linklayer setting by detecting if the rate
349 * table were modified.
350 *
351 * For linklayer ATM table entries, the rate table will be aligned to
352 * 48 bytes, thus some table entries will contain the same value. The
353 * mpu (min packet unit) is also encoded into the old rate table, thus
354 * starting from the mpu, we find low and high table entries for
355 * mapping this cell. If these entries contain the same value, when
356 * the rate tables have been modified for linklayer ATM.
357 *
358 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
359 * and then roundup to the next cell, calc the table entry one below,
360 * and compare.
361 */
362static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
363{
364 int low = roundup(r->mpu, 48);
365 int high = roundup(low+1, 48);
366 int cell_low = low >> r->cell_log;
367 int cell_high = (high >> r->cell_log) - 1;
368
369 /* rtab is too inaccurate at rates > 100Mbit/s */
370 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
371 pr_debug("TC linklayer: Giving up ATM detection\n");
372 return TC_LINKLAYER_ETHERNET;
373 }
374
375 if ((cell_high > cell_low) && (cell_high < 256)
376 && (rtab[cell_low] == rtab[cell_high])) {
377 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
378 cell_low, cell_high, rtab[cell_high]);
379 return TC_LINKLAYER_ATM;
380 }
381 return TC_LINKLAYER_ETHERNET;
382}
383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384static struct qdisc_rate_table *qdisc_rtab_list;
385
Patrick McHardy1e904742008-01-22 22:11:17 -0800386struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387{
388 struct qdisc_rate_table *rtab;
389
Eric Dumazet40edeff2013-06-02 11:15:55 +0000390 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
391 nla_len(tab) != TC_RTAB_SIZE)
392 return NULL;
393
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
Eric Dumazet40edeff2013-06-02 11:15:55 +0000395 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
396 !memcmp(&rtab->data, nla_data(tab), 1024)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 rtab->refcnt++;
398 return rtab;
399 }
400 }
401
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
403 if (rtab) {
404 rtab->rate = *r;
405 rtab->refcnt = 1;
Patrick McHardy1e904742008-01-22 22:11:17 -0800406 memcpy(rtab->data, nla_data(tab), 1024);
Jesper Dangaard Brouer8a8e3d82013-08-14 23:47:11 +0200407 if (r->linklayer == TC_LINKLAYER_UNAWARE)
408 r->linklayer = __detect_linklayer(r, rtab->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 rtab->next = qdisc_rtab_list;
410 qdisc_rtab_list = rtab;
411 }
412 return rtab;
413}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800414EXPORT_SYMBOL(qdisc_get_rtab);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
416void qdisc_put_rtab(struct qdisc_rate_table *tab)
417{
418 struct qdisc_rate_table *rtab, **rtabp;
419
420 if (!tab || --tab->refcnt)
421 return;
422
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000423 for (rtabp = &qdisc_rtab_list;
424 (rtab = *rtabp) != NULL;
425 rtabp = &rtab->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 if (rtab == tab) {
427 *rtabp = rtab->next;
428 kfree(rtab);
429 return;
430 }
431 }
432}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800433EXPORT_SYMBOL(qdisc_put_rtab);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700435static LIST_HEAD(qdisc_stab_list);
436static DEFINE_SPINLOCK(qdisc_stab_lock);
437
438static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
439 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
440 [TCA_STAB_DATA] = { .type = NLA_BINARY },
441};
442
443static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
444{
445 struct nlattr *tb[TCA_STAB_MAX + 1];
446 struct qdisc_size_table *stab;
447 struct tc_sizespec *s;
448 unsigned int tsize = 0;
449 u16 *tab = NULL;
450 int err;
451
452 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
453 if (err < 0)
454 return ERR_PTR(err);
455 if (!tb[TCA_STAB_BASE])
456 return ERR_PTR(-EINVAL);
457
458 s = nla_data(tb[TCA_STAB_BASE]);
459
460 if (s->tsize > 0) {
461 if (!tb[TCA_STAB_DATA])
462 return ERR_PTR(-EINVAL);
463 tab = nla_data(tb[TCA_STAB_DATA]);
464 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
465 }
466
Dan Carpenter00093fa2010-08-14 11:09:49 +0000467 if (tsize != s->tsize || (!tab && tsize > 0))
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700468 return ERR_PTR(-EINVAL);
469
David S. Millerf3b96052008-08-18 22:33:05 -0700470 spin_lock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700471
472 list_for_each_entry(stab, &qdisc_stab_list, list) {
473 if (memcmp(&stab->szopts, s, sizeof(*s)))
474 continue;
475 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
476 continue;
477 stab->refcnt++;
David S. Millerf3b96052008-08-18 22:33:05 -0700478 spin_unlock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700479 return stab;
480 }
481
David S. Millerf3b96052008-08-18 22:33:05 -0700482 spin_unlock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700483
484 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
485 if (!stab)
486 return ERR_PTR(-ENOMEM);
487
488 stab->refcnt = 1;
489 stab->szopts = *s;
490 if (tsize > 0)
491 memcpy(stab->data, tab, tsize * sizeof(u16));
492
David S. Millerf3b96052008-08-18 22:33:05 -0700493 spin_lock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700494 list_add_tail(&stab->list, &qdisc_stab_list);
David S. Millerf3b96052008-08-18 22:33:05 -0700495 spin_unlock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700496
497 return stab;
498}
499
Eric Dumazeta2da5702011-01-20 03:48:19 +0000500static void stab_kfree_rcu(struct rcu_head *head)
501{
502 kfree(container_of(head, struct qdisc_size_table, rcu));
503}
504
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700505void qdisc_put_stab(struct qdisc_size_table *tab)
506{
507 if (!tab)
508 return;
509
David S. Millerf3b96052008-08-18 22:33:05 -0700510 spin_lock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700511
512 if (--tab->refcnt == 0) {
513 list_del(&tab->list);
Eric Dumazeta2da5702011-01-20 03:48:19 +0000514 call_rcu_bh(&tab->rcu, stab_kfree_rcu);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700515 }
516
David S. Millerf3b96052008-08-18 22:33:05 -0700517 spin_unlock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700518}
519EXPORT_SYMBOL(qdisc_put_stab);
520
521static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
522{
523 struct nlattr *nest;
524
525 nest = nla_nest_start(skb, TCA_STAB);
Patrick McHardy3aa46142008-11-20 04:07:14 -0800526 if (nest == NULL)
527 goto nla_put_failure;
David S. Miller1b34ec42012-03-29 05:11:39 -0400528 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
529 goto nla_put_failure;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700530 nla_nest_end(skb, nest);
531
532 return skb->len;
533
534nla_put_failure:
535 return -1;
536}
537
Eric Dumazeta2da5702011-01-20 03:48:19 +0000538void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700539{
540 int pkt_len, slot;
541
542 pkt_len = skb->len + stab->szopts.overhead;
543 if (unlikely(!stab->szopts.tsize))
544 goto out;
545
546 slot = pkt_len + stab->szopts.cell_align;
547 if (unlikely(slot < 0))
548 slot = 0;
549
550 slot >>= stab->szopts.cell_log;
551 if (likely(slot < stab->szopts.tsize))
552 pkt_len = stab->data[slot];
553 else
554 pkt_len = stab->data[stab->szopts.tsize - 1] *
555 (slot / stab->szopts.tsize) +
556 stab->data[slot % stab->szopts.tsize];
557
558 pkt_len <<= stab->szopts.size_log;
559out:
560 if (unlikely(pkt_len < 1))
561 pkt_len = 1;
562 qdisc_skb_cb(skb)->pkt_len = pkt_len;
563}
Eric Dumazeta2da5702011-01-20 03:48:19 +0000564EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700565
Florian Westphal6e765a02014-06-11 20:35:18 +0200566void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
Jarek Poplawskib00355d2009-02-01 01:12:42 -0800567{
568 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000569 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
570 txt, qdisc->ops->id, qdisc->handle >> 16);
Jarek Poplawskib00355d2009-02-01 01:12:42 -0800571 qdisc->flags |= TCQ_F_WARN_NONWC;
572 }
573}
574EXPORT_SYMBOL(qdisc_warn_nonwc);
575
Patrick McHardy41794772007-03-16 01:19:15 -0700576static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
577{
578 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
David S. Miller2fbd3da2009-09-01 17:59:25 -0700579 timer);
Patrick McHardy41794772007-03-16 01:19:15 -0700580
John Fastabend1e203c12014-10-02 22:43:09 -0700581 rcu_read_lock();
Eric Dumazetfd245a42011-01-20 05:27:16 +0000582 qdisc_unthrottled(wd->qdisc);
David S. Miller8608db02008-08-18 20:51:18 -0700583 __netif_schedule(qdisc_root(wd->qdisc));
John Fastabend1e203c12014-10-02 22:43:09 -0700584 rcu_read_unlock();
Stephen Hemminger19365022007-03-22 12:18:35 -0700585
Patrick McHardy41794772007-03-16 01:19:15 -0700586 return HRTIMER_NORESTART;
587}
588
589void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
590{
Eric Dumazet4a8e3202014-09-20 18:01:30 -0700591 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
David S. Miller2fbd3da2009-09-01 17:59:25 -0700592 wd->timer.function = qdisc_watchdog;
Patrick McHardy41794772007-03-16 01:19:15 -0700593 wd->qdisc = qdisc;
594}
595EXPORT_SYMBOL(qdisc_watchdog_init);
596
Eric Dumazetf2600cf2014-10-04 10:11:31 -0700597void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle)
Patrick McHardy41794772007-03-16 01:19:15 -0700598{
Jarek Poplawski2540e052008-08-21 05:11:14 -0700599 if (test_bit(__QDISC_STATE_DEACTIVATED,
600 &qdisc_root_sleeping(wd->qdisc)->state))
601 return;
602
Eric Dumazetf2600cf2014-10-04 10:11:31 -0700603 if (throttle)
604 qdisc_throttled(wd->qdisc);
Eric Dumazet46baac32012-10-20 00:40:51 +0000605
606 hrtimer_start(&wd->timer,
Jiri Pirko34c5d292013-02-12 00:12:04 +0000607 ns_to_ktime(expires),
Eric Dumazet4a8e3202014-09-20 18:01:30 -0700608 HRTIMER_MODE_ABS_PINNED);
Patrick McHardy41794772007-03-16 01:19:15 -0700609}
Jiri Pirko34c5d292013-02-12 00:12:04 +0000610EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
Patrick McHardy41794772007-03-16 01:19:15 -0700611
612void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
613{
David S. Miller2fbd3da2009-09-01 17:59:25 -0700614 hrtimer_cancel(&wd->timer);
Eric Dumazetfd245a42011-01-20 05:27:16 +0000615 qdisc_unthrottled(wd->qdisc);
Patrick McHardy41794772007-03-16 01:19:15 -0700616}
617EXPORT_SYMBOL(qdisc_watchdog_cancel);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618
Adrian Bunka94f7792008-07-22 14:20:11 -0700619static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
Patrick McHardy6fe1c7a2008-07-05 23:21:31 -0700620{
621 unsigned int size = n * sizeof(struct hlist_head), i;
622 struct hlist_head *h;
623
624 if (size <= PAGE_SIZE)
625 h = kmalloc(size, GFP_KERNEL);
626 else
627 h = (struct hlist_head *)
628 __get_free_pages(GFP_KERNEL, get_order(size));
629
630 if (h != NULL) {
631 for (i = 0; i < n; i++)
632 INIT_HLIST_HEAD(&h[i]);
633 }
634 return h;
635}
636
637static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
638{
639 unsigned int size = n * sizeof(struct hlist_head);
640
641 if (size <= PAGE_SIZE)
642 kfree(h);
643 else
644 free_pages((unsigned long)h, get_order(size));
645}
646
647void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
648{
649 struct Qdisc_class_common *cl;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800650 struct hlist_node *next;
Patrick McHardy6fe1c7a2008-07-05 23:21:31 -0700651 struct hlist_head *nhash, *ohash;
652 unsigned int nsize, nmask, osize;
653 unsigned int i, h;
654
655 /* Rehash when load factor exceeds 0.75 */
656 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
657 return;
658 nsize = clhash->hashsize * 2;
659 nmask = nsize - 1;
660 nhash = qdisc_class_hash_alloc(nsize);
661 if (nhash == NULL)
662 return;
663
664 ohash = clhash->hash;
665 osize = clhash->hashsize;
666
667 sch_tree_lock(sch);
668 for (i = 0; i < osize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800669 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
Patrick McHardy6fe1c7a2008-07-05 23:21:31 -0700670 h = qdisc_class_hash(cl->classid, nmask);
671 hlist_add_head(&cl->hnode, &nhash[h]);
672 }
673 }
674 clhash->hash = nhash;
675 clhash->hashsize = nsize;
676 clhash->hashmask = nmask;
677 sch_tree_unlock(sch);
678
679 qdisc_class_hash_free(ohash, osize);
680}
681EXPORT_SYMBOL(qdisc_class_hash_grow);
682
683int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
684{
685 unsigned int size = 4;
686
687 clhash->hash = qdisc_class_hash_alloc(size);
688 if (clhash->hash == NULL)
689 return -ENOMEM;
690 clhash->hashsize = size;
691 clhash->hashmask = size - 1;
692 clhash->hashelems = 0;
693 return 0;
694}
695EXPORT_SYMBOL(qdisc_class_hash_init);
696
697void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
698{
699 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
700}
701EXPORT_SYMBOL(qdisc_class_hash_destroy);
702
703void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
704 struct Qdisc_class_common *cl)
705{
706 unsigned int h;
707
708 INIT_HLIST_NODE(&cl->hnode);
709 h = qdisc_class_hash(cl->classid, clhash->hashmask);
710 hlist_add_head(&cl->hnode, &clhash->hash[h]);
711 clhash->hashelems++;
712}
713EXPORT_SYMBOL(qdisc_class_hash_insert);
714
715void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
716 struct Qdisc_class_common *cl)
717{
718 hlist_del(&cl->hnode);
719 clhash->hashelems--;
720}
721EXPORT_SYMBOL(qdisc_class_hash_remove);
722
Eric Dumazetfa0f5aa2012-01-03 00:00:11 +0000723/* Allocate an unique handle from space managed by kernel
724 * Possible range is [8000-FFFF]:0000 (0x8000 values)
725 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726static u32 qdisc_alloc_handle(struct net_device *dev)
727{
Eric Dumazetfa0f5aa2012-01-03 00:00:11 +0000728 int i = 0x8000;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
730
731 do {
732 autohandle += TC_H_MAKE(0x10000U, 0);
733 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
734 autohandle = TC_H_MAKE(0x80000000U, 0);
Eric Dumazetfa0f5aa2012-01-03 00:00:11 +0000735 if (!qdisc_lookup(dev, autohandle))
736 return autohandle;
737 cond_resched();
738 } while (--i > 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
Eric Dumazetfa0f5aa2012-01-03 00:00:11 +0000740 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741}
742
Patrick McHardy43effa12006-11-29 17:35:48 -0800743void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
744{
Eric Dumazet20fea082007-11-14 01:44:41 -0800745 const struct Qdisc_class_ops *cops;
Patrick McHardy43effa12006-11-29 17:35:48 -0800746 unsigned long cl;
747 u32 parentid;
Eric Dumazet2c8c8e62013-10-07 08:32:32 -0700748 int drops;
Patrick McHardy43effa12006-11-29 17:35:48 -0800749
750 if (n == 0)
751 return;
Eric Dumazet2c8c8e62013-10-07 08:32:32 -0700752 drops = max_t(int, n, 0);
Patrick McHardy43effa12006-11-29 17:35:48 -0800753 while ((parentid = sch->parent)) {
Jarek Poplawski066a3b52008-04-14 15:10:42 -0700754 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
755 return;
756
David S. Miller5ce2d482008-07-08 17:06:30 -0700757 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
Patrick McHardyffc8fef2007-07-30 17:11:50 -0700758 if (sch == NULL) {
759 WARN_ON(parentid != TC_H_ROOT);
760 return;
761 }
Patrick McHardy43effa12006-11-29 17:35:48 -0800762 cops = sch->ops->cl_ops;
763 if (cops->qlen_notify) {
764 cl = cops->get(sch, parentid);
765 cops->qlen_notify(sch, cl);
766 cops->put(sch, cl);
767 }
768 sch->q.qlen -= n;
John Fastabend25331d62014-09-28 11:53:29 -0700769 __qdisc_qstats_drop(sch, drops);
Patrick McHardy43effa12006-11-29 17:35:48 -0800770 }
771}
772EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773
Tom Goff7316ae82010-03-19 15:40:13 +0000774static void notify_and_destroy(struct net *net, struct sk_buff *skb,
775 struct nlmsghdr *n, u32 clid,
David S. Miller99194cf2008-07-17 04:54:10 -0700776 struct Qdisc *old, struct Qdisc *new)
777{
778 if (new || old)
Tom Goff7316ae82010-03-19 15:40:13 +0000779 qdisc_notify(net, skb, n, clid, old, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
David S. Miller4d8863a2008-08-18 21:03:15 -0700781 if (old)
David S. Miller99194cf2008-07-17 04:54:10 -0700782 qdisc_destroy(old);
David S. Miller99194cf2008-07-17 04:54:10 -0700783}
784
785/* Graft qdisc "new" to class "classid" of qdisc "parent" or
786 * to device "dev".
787 *
788 * When appropriate send a netlink notification using 'skb'
789 * and "n".
790 *
791 * On success, destroy old qdisc.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 */
793
794static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
David S. Miller99194cf2008-07-17 04:54:10 -0700795 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
796 struct Qdisc *new, struct Qdisc *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797{
David S. Miller99194cf2008-07-17 04:54:10 -0700798 struct Qdisc *q = old;
Tom Goff7316ae82010-03-19 15:40:13 +0000799 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900802 if (parent == NULL) {
David S. Miller99194cf2008-07-17 04:54:10 -0700803 unsigned int i, num_q, ingress;
804
805 ingress = 0;
806 num_q = dev->num_tx_queues;
David S. Miller8d50b532008-07-30 02:37:46 -0700807 if ((q && q->flags & TCQ_F_INGRESS) ||
808 (new && new->flags & TCQ_F_INGRESS)) {
David S. Miller99194cf2008-07-17 04:54:10 -0700809 num_q = 1;
810 ingress = 1;
Eric Dumazet24824a02010-10-02 06:11:55 +0000811 if (!dev_ingress_queue(dev))
812 return -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 }
David S. Miller99194cf2008-07-17 04:54:10 -0700814
815 if (dev->flags & IFF_UP)
816 dev_deactivate(dev);
817
David S. Miller6ec1c692009-09-06 01:58:51 -0700818 if (new && new->ops->attach) {
819 new->ops->attach(new);
820 num_q = 0;
821 }
822
David S. Miller99194cf2008-07-17 04:54:10 -0700823 for (i = 0; i < num_q; i++) {
Eric Dumazet24824a02010-10-02 06:11:55 +0000824 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
David S. Miller99194cf2008-07-17 04:54:10 -0700825
826 if (!ingress)
827 dev_queue = netdev_get_tx_queue(dev, i);
828
David S. Miller8d50b532008-07-30 02:37:46 -0700829 old = dev_graft_qdisc(dev_queue, new);
830 if (new && i > 0)
831 atomic_inc(&new->refcnt);
832
Jarek Poplawski036d6a62009-09-13 22:35:44 +0000833 if (!ingress)
834 qdisc_destroy(old);
David S. Miller99194cf2008-07-17 04:54:10 -0700835 }
836
Jarek Poplawski036d6a62009-09-13 22:35:44 +0000837 if (!ingress) {
Tom Goff7316ae82010-03-19 15:40:13 +0000838 notify_and_destroy(net, skb, n, classid,
839 dev->qdisc, new);
Jarek Poplawski036d6a62009-09-13 22:35:44 +0000840 if (new && !new->ops->attach)
841 atomic_inc(&new->refcnt);
842 dev->qdisc = new ? : &noop_qdisc;
843 } else {
Tom Goff7316ae82010-03-19 15:40:13 +0000844 notify_and_destroy(net, skb, n, classid, old, new);
Jarek Poplawski036d6a62009-09-13 22:35:44 +0000845 }
Patrick McHardyaf356af2009-09-04 06:41:18 +0000846
David S. Miller99194cf2008-07-17 04:54:10 -0700847 if (dev->flags & IFF_UP)
848 dev_activate(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 } else {
Eric Dumazet20fea082007-11-14 01:44:41 -0800850 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
Patrick McHardyc9f1d032009-09-04 06:41:13 +0000852 err = -EOPNOTSUPP;
853 if (cops && cops->graft) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 unsigned long cl = cops->get(parent, classid);
855 if (cl) {
David S. Miller99194cf2008-07-17 04:54:10 -0700856 err = cops->graft(parent, cl, new, &old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 cops->put(parent, cl);
Patrick McHardyc9f1d032009-09-04 06:41:13 +0000858 } else
859 err = -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 }
David S. Miller99194cf2008-07-17 04:54:10 -0700861 if (!err)
Tom Goff7316ae82010-03-19 15:40:13 +0000862 notify_and_destroy(net, skb, n, classid, old, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 }
864 return err;
865}
866
Jarek Poplawski25bfcd52008-08-18 20:53:34 -0700867/* lockdep annotation is needed for ingress; egress gets it only for name */
868static struct lock_class_key qdisc_tx_lock;
869static struct lock_class_key qdisc_rx_lock;
870
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871/*
872 Allocate and initialize new qdisc.
873
874 Parameters are passed via opt.
875 */
876
877static struct Qdisc *
David S. Millerbb949fb2008-07-08 16:55:56 -0700878qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
Patrick McHardy23bcf632009-09-09 18:11:23 -0700879 struct Qdisc *p, u32 parent, u32 handle,
880 struct nlattr **tca, int *errp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881{
882 int err;
Patrick McHardy1e904742008-01-22 22:11:17 -0800883 struct nlattr *kind = tca[TCA_KIND];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 struct Qdisc *sch;
885 struct Qdisc_ops *ops;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700886 struct qdisc_size_table *stab;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887
888 ops = qdisc_lookup_ops(kind);
Johannes Berg95a5afc2008-10-16 15:24:51 -0700889#ifdef CONFIG_MODULES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 if (ops == NULL && kind != NULL) {
891 char name[IFNAMSIZ];
Patrick McHardy1e904742008-01-22 22:11:17 -0800892 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 /* We dropped the RTNL semaphore in order to
894 * perform the module load. So, even if we
895 * succeeded in loading the module we have to
896 * tell the caller to replay the request. We
897 * indicate this using -EAGAIN.
898 * We replay the request because the device may
899 * go away in the mean time.
900 */
901 rtnl_unlock();
902 request_module("sch_%s", name);
903 rtnl_lock();
904 ops = qdisc_lookup_ops(kind);
905 if (ops != NULL) {
906 /* We will try again qdisc_lookup_ops,
907 * so don't keep a reference.
908 */
909 module_put(ops->owner);
910 err = -EAGAIN;
911 goto err_out;
912 }
913 }
914 }
915#endif
916
Jamal Hadi Salimb9e2cc02006-08-03 16:36:51 -0700917 err = -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 if (ops == NULL)
919 goto err_out;
920
David S. Miller5ce2d482008-07-08 17:06:30 -0700921 sch = qdisc_alloc(dev_queue, ops);
Thomas Graf3d54b822005-07-05 14:15:09 -0700922 if (IS_ERR(sch)) {
923 err = PTR_ERR(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 goto err_out2;
Thomas Graf3d54b822005-07-05 14:15:09 -0700925 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926
Patrick McHardyffc8fef2007-07-30 17:11:50 -0700927 sch->parent = parent;
928
Thomas Graf3d54b822005-07-05 14:15:09 -0700929 if (handle == TC_H_INGRESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 sch->flags |= TCQ_F_INGRESS;
Thomas Graf3d54b822005-07-05 14:15:09 -0700931 handle = TC_H_MAKE(TC_H_INGRESS, 0);
Jarek Poplawski25bfcd52008-08-18 20:53:34 -0700932 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
Patrick McHardyfd44de72007-04-16 17:07:08 -0700933 } else {
Patrick McHardyfd44de72007-04-16 17:07:08 -0700934 if (handle == 0) {
935 handle = qdisc_alloc_handle(dev);
936 err = -ENOMEM;
937 if (handle == 0)
938 goto err_out3;
939 }
Jarek Poplawski25bfcd52008-08-18 20:53:34 -0700940 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
Eric Dumazet1abbe132012-12-11 15:54:33 +0000941 if (!netif_is_multiqueue(dev))
942 sch->flags |= TCQ_F_ONETXQUEUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 }
944
Thomas Graf3d54b822005-07-05 14:15:09 -0700945 sch->handle = handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
Patrick McHardy1e904742008-01-22 22:11:17 -0800947 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
John Fastabend22e0f8b2014-09-28 11:52:56 -0700948 if (qdisc_is_percpu_stats(sch)) {
949 sch->cpu_bstats =
Sabrina Dubroca7c1c97d2014-10-21 11:23:30 +0200950 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
John Fastabend22e0f8b2014-09-28 11:52:56 -0700951 if (!sch->cpu_bstats)
952 goto err_out4;
John Fastabendb0ab6f92014-09-28 11:54:24 -0700953
954 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
955 if (!sch->cpu_qstats)
956 goto err_out4;
John Fastabend22e0f8b2014-09-28 11:52:56 -0700957 }
958
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700959 if (tca[TCA_STAB]) {
960 stab = qdisc_get_stab(tca[TCA_STAB]);
961 if (IS_ERR(stab)) {
962 err = PTR_ERR(stab);
Jarek Poplawski7c64b9f2009-09-15 23:42:05 -0700963 goto err_out4;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700964 }
Eric Dumazeta2da5702011-01-20 03:48:19 +0000965 rcu_assign_pointer(sch->stab, stab);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700966 }
Patrick McHardy1e904742008-01-22 22:11:17 -0800967 if (tca[TCA_RATE]) {
Jarek Poplawskif6f9b932008-08-27 02:25:17 -0700968 spinlock_t *root_lock;
969
Patrick McHardy23bcf632009-09-09 18:11:23 -0700970 err = -EOPNOTSUPP;
971 if (sch->flags & TCQ_F_MQROOT)
972 goto err_out4;
973
Jarek Poplawskif6f9b932008-08-27 02:25:17 -0700974 if ((sch->parent != TC_H_ROOT) &&
Patrick McHardy23bcf632009-09-09 18:11:23 -0700975 !(sch->flags & TCQ_F_INGRESS) &&
976 (!p || !(p->flags & TCQ_F_MQROOT)))
Jarek Poplawskif6f9b932008-08-27 02:25:17 -0700977 root_lock = qdisc_root_sleeping_lock(sch);
978 else
979 root_lock = qdisc_lock(sch);
980
John Fastabend22e0f8b2014-09-28 11:52:56 -0700981 err = gen_new_estimator(&sch->bstats,
982 sch->cpu_bstats,
983 &sch->rate_est,
984 root_lock,
985 tca[TCA_RATE]);
Patrick McHardy23bcf632009-09-09 18:11:23 -0700986 if (err)
987 goto err_out4;
Thomas Graf023e09a2005-07-05 14:15:53 -0700988 }
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700989
990 qdisc_list_add(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 return sch;
993 }
994err_out3:
995 dev_put(dev);
Thomas Graf3d54b822005-07-05 14:15:09 -0700996 kfree((char *) sch - sch->padded);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997err_out2:
998 module_put(ops->owner);
999err_out:
1000 *errp = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 return NULL;
Patrick McHardy23bcf632009-09-09 18:11:23 -07001002
1003err_out4:
John Fastabend22e0f8b2014-09-28 11:52:56 -07001004 free_percpu(sch->cpu_bstats);
John Fastabendb0ab6f92014-09-28 11:54:24 -07001005 free_percpu(sch->cpu_qstats);
Patrick McHardy23bcf632009-09-09 18:11:23 -07001006 /*
1007 * Any broken qdiscs that would require a ops->reset() here?
1008 * The qdisc was never in action so it shouldn't be necessary.
1009 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00001010 qdisc_put_stab(rtnl_dereference(sch->stab));
Patrick McHardy23bcf632009-09-09 18:11:23 -07001011 if (ops->destroy)
1012 ops->destroy(sch);
1013 goto err_out3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014}
1015
Patrick McHardy1e904742008-01-22 22:11:17 -08001016static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017{
Eric Dumazeta2da5702011-01-20 03:48:19 +00001018 struct qdisc_size_table *ostab, *stab = NULL;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001019 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001021 if (tca[TCA_OPTIONS]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 if (sch->ops->change == NULL)
1023 return -EINVAL;
Patrick McHardy1e904742008-01-22 22:11:17 -08001024 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 if (err)
1026 return err;
1027 }
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001028
1029 if (tca[TCA_STAB]) {
1030 stab = qdisc_get_stab(tca[TCA_STAB]);
1031 if (IS_ERR(stab))
1032 return PTR_ERR(stab);
1033 }
1034
Eric Dumazeta2da5702011-01-20 03:48:19 +00001035 ostab = rtnl_dereference(sch->stab);
1036 rcu_assign_pointer(sch->stab, stab);
1037 qdisc_put_stab(ostab);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001038
Patrick McHardy23bcf632009-09-09 18:11:23 -07001039 if (tca[TCA_RATE]) {
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001040 /* NB: ignores errors from replace_estimator
1041 because change can't be undone. */
Patrick McHardy23bcf632009-09-09 18:11:23 -07001042 if (sch->flags & TCQ_F_MQROOT)
1043 goto out;
John Fastabend22e0f8b2014-09-28 11:52:56 -07001044 gen_replace_estimator(&sch->bstats,
1045 sch->cpu_bstats,
1046 &sch->rate_est,
1047 qdisc_root_sleeping_lock(sch),
1048 tca[TCA_RATE]);
Patrick McHardy23bcf632009-09-09 18:11:23 -07001049 }
1050out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 return 0;
1052}
1053
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001054struct check_loop_arg {
1055 struct qdisc_walker w;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 struct Qdisc *p;
1057 int depth;
1058};
1059
1060static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
1061
1062static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1063{
1064 struct check_loop_arg arg;
1065
1066 if (q->ops->cl_ops == NULL)
1067 return 0;
1068
1069 arg.w.stop = arg.w.skip = arg.w.count = 0;
1070 arg.w.fn = check_loop_fn;
1071 arg.depth = depth;
1072 arg.p = p;
1073 q->ops->cl_ops->walk(q, &arg.w);
1074 return arg.w.stop ? -ELOOP : 0;
1075}
1076
1077static int
1078check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1079{
1080 struct Qdisc *leaf;
Eric Dumazet20fea082007-11-14 01:44:41 -08001081 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1083
1084 leaf = cops->leaf(q, cl);
1085 if (leaf) {
1086 if (leaf == arg->p || arg->depth > 7)
1087 return -ELOOP;
1088 return check_loop(leaf, arg->p, arg->depth + 1);
1089 }
1090 return 0;
1091}
1092
1093/*
1094 * Delete/get qdisc.
1095 */
1096
Thomas Graf661d2962013-03-21 07:45:29 +00001097static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001099 struct net *net = sock_net(skb->sk);
David S. Miller02ef22c2012-06-26 21:50:05 -07001100 struct tcmsg *tcm = nlmsg_data(n);
Patrick McHardy1e904742008-01-22 22:11:17 -08001101 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 struct net_device *dev;
Hong zhi guode179c82013-03-25 17:36:33 +00001103 u32 clid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 struct Qdisc *q = NULL;
1105 struct Qdisc *p = NULL;
1106 int err;
1107
Stéphane Graber4e8bbb82014-04-30 11:25:43 -04001108 if ((n->nlmsg_type != RTM_GETQDISC) &&
David S. Miller5f013c9b2014-05-12 13:19:14 -04001109 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
Eric W. Biedermandfc47ef2012-11-16 03:03:00 +00001110 return -EPERM;
1111
Patrick McHardy1e904742008-01-22 22:11:17 -08001112 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1113 if (err < 0)
1114 return err;
1115
Hong zhi guode179c82013-03-25 17:36:33 +00001116 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1117 if (!dev)
1118 return -ENODEV;
1119
1120 clid = tcm->tcm_parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 if (clid) {
1122 if (clid != TC_H_ROOT) {
1123 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001124 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1125 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 return -ENOENT;
1127 q = qdisc_leaf(p, clid);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001128 } else if (dev_ingress_queue(dev)) {
1129 q = dev_ingress_queue(dev)->qdisc_sleeping;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001130 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 } else {
Patrick McHardyaf356af2009-09-04 06:41:18 +00001132 q = dev->qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 }
1134 if (!q)
1135 return -ENOENT;
1136
1137 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
1138 return -EINVAL;
1139 } else {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001140 q = qdisc_lookup(dev, tcm->tcm_handle);
1141 if (!q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 return -ENOENT;
1143 }
1144
Patrick McHardy1e904742008-01-22 22:11:17 -08001145 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 return -EINVAL;
1147
1148 if (n->nlmsg_type == RTM_DELQDISC) {
1149 if (!clid)
1150 return -EINVAL;
1151 if (q->handle == 0)
1152 return -ENOENT;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001153 err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
1154 if (err != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 } else {
Tom Goff7316ae82010-03-19 15:40:13 +00001157 qdisc_notify(net, skb, n, clid, NULL, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 }
1159 return 0;
1160}
1161
1162/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001163 * Create/change qdisc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 */
1165
Thomas Graf661d2962013-03-21 07:45:29 +00001166static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001168 struct net *net = sock_net(skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 struct tcmsg *tcm;
Patrick McHardy1e904742008-01-22 22:11:17 -08001170 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 struct net_device *dev;
1172 u32 clid;
1173 struct Qdisc *q, *p;
1174 int err;
1175
David S. Miller5f013c9b2014-05-12 13:19:14 -04001176 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
Eric W. Biedermandfc47ef2012-11-16 03:03:00 +00001177 return -EPERM;
1178
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179replay:
1180 /* Reinit, just in case something touches this. */
Hong zhi guode179c82013-03-25 17:36:33 +00001181 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1182 if (err < 0)
1183 return err;
1184
David S. Miller02ef22c2012-06-26 21:50:05 -07001185 tcm = nlmsg_data(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 clid = tcm->tcm_parent;
1187 q = p = NULL;
1188
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001189 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1190 if (!dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191 return -ENODEV;
1192
Patrick McHardy1e904742008-01-22 22:11:17 -08001193
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 if (clid) {
1195 if (clid != TC_H_ROOT) {
1196 if (clid != TC_H_INGRESS) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001197 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1198 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 return -ENOENT;
1200 q = qdisc_leaf(p, clid);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001201 } else if (dev_ingress_queue_create(dev)) {
1202 q = dev_ingress_queue(dev)->qdisc_sleeping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 }
1204 } else {
Patrick McHardyaf356af2009-09-04 06:41:18 +00001205 q = dev->qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 }
1207
1208 /* It may be default qdisc, ignore it */
1209 if (q && q->handle == 0)
1210 q = NULL;
1211
1212 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1213 if (tcm->tcm_handle) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001214 if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 return -EEXIST;
1216 if (TC_H_MIN(tcm->tcm_handle))
1217 return -EINVAL;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001218 q = qdisc_lookup(dev, tcm->tcm_handle);
1219 if (!q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 goto create_n_graft;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001221 if (n->nlmsg_flags & NLM_F_EXCL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 return -EEXIST;
Patrick McHardy1e904742008-01-22 22:11:17 -08001223 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 return -EINVAL;
1225 if (q == p ||
1226 (p && check_loop(q, p, 0)))
1227 return -ELOOP;
1228 atomic_inc(&q->refcnt);
1229 goto graft;
1230 } else {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001231 if (!q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 goto create_n_graft;
1233
1234 /* This magic test requires explanation.
1235 *
1236 * We know, that some child q is already
1237 * attached to this parent and have choice:
1238 * either to change it or to create/graft new one.
1239 *
1240 * 1. We are allowed to create/graft only
1241 * if CREATE and REPLACE flags are set.
1242 *
1243 * 2. If EXCL is set, requestor wanted to say,
1244 * that qdisc tcm_handle is not expected
1245 * to exist, so that we choose create/graft too.
1246 *
1247 * 3. The last case is when no flags are set.
1248 * Alas, it is sort of hole in API, we
1249 * cannot decide what to do unambiguously.
1250 * For now we select create/graft, if
1251 * user gave KIND, which does not match existing.
1252 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001253 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1254 (n->nlmsg_flags & NLM_F_REPLACE) &&
1255 ((n->nlmsg_flags & NLM_F_EXCL) ||
Patrick McHardy1e904742008-01-22 22:11:17 -08001256 (tca[TCA_KIND] &&
1257 nla_strcmp(tca[TCA_KIND], q->ops->id))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 goto create_n_graft;
1259 }
1260 }
1261 } else {
1262 if (!tcm->tcm_handle)
1263 return -EINVAL;
1264 q = qdisc_lookup(dev, tcm->tcm_handle);
1265 }
1266
1267 /* Change qdisc parameters */
1268 if (q == NULL)
1269 return -ENOENT;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001270 if (n->nlmsg_flags & NLM_F_EXCL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 return -EEXIST;
Patrick McHardy1e904742008-01-22 22:11:17 -08001272 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 return -EINVAL;
1274 err = qdisc_change(q, tca);
1275 if (err == 0)
Tom Goff7316ae82010-03-19 15:40:13 +00001276 qdisc_notify(net, skb, n, clid, NULL, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 return err;
1278
1279create_n_graft:
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001280 if (!(n->nlmsg_flags & NLM_F_CREATE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 return -ENOENT;
Eric Dumazet24824a02010-10-02 06:11:55 +00001282 if (clid == TC_H_INGRESS) {
1283 if (dev_ingress_queue(dev))
1284 q = qdisc_create(dev, dev_ingress_queue(dev), p,
1285 tcm->tcm_parent, tcm->tcm_parent,
1286 tca, &err);
1287 else
1288 err = -ENOENT;
1289 } else {
Jarek Poplawski926e61b2009-09-15 02:53:07 -07001290 struct netdev_queue *dev_queue;
David S. Miller6ec1c692009-09-06 01:58:51 -07001291
1292 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
Jarek Poplawski926e61b2009-09-15 02:53:07 -07001293 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1294 else if (p)
1295 dev_queue = p->dev_queue;
1296 else
1297 dev_queue = netdev_get_tx_queue(dev, 0);
David S. Miller6ec1c692009-09-06 01:58:51 -07001298
Jarek Poplawski926e61b2009-09-15 02:53:07 -07001299 q = qdisc_create(dev, dev_queue, p,
David S. Millerbb949fb2008-07-08 16:55:56 -07001300 tcm->tcm_parent, tcm->tcm_handle,
Patrick McHardyffc8fef2007-07-30 17:11:50 -07001301 tca, &err);
David S. Miller6ec1c692009-09-06 01:58:51 -07001302 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 if (q == NULL) {
1304 if (err == -EAGAIN)
1305 goto replay;
1306 return err;
1307 }
1308
1309graft:
Ilpo Järvinene5befbd2008-08-18 22:30:01 -07001310 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1311 if (err) {
1312 if (q)
1313 qdisc_destroy(q);
1314 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 }
Ilpo Järvinene5befbd2008-08-18 22:30:01 -07001316
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 return 0;
1318}
1319
1320static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001321 u32 portid, u32 seq, u16 flags, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322{
John Fastabend22e0f8b2014-09-28 11:52:56 -07001323 struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
John Fastabendb0ab6f92014-09-28 11:54:24 -07001324 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 struct tcmsg *tcm;
1326 struct nlmsghdr *nlh;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001327 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 struct gnet_dump d;
Eric Dumazeta2da5702011-01-20 03:48:19 +00001329 struct qdisc_size_table *stab;
John Fastabend64015852014-09-28 11:53:57 -07001330 __u32 qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331
Eric Dumazetfba373d2014-03-10 17:11:43 -07001332 cond_resched();
Eric W. Biederman15e47302012-09-07 20:12:54 +00001333 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
David S. Miller02ef22c2012-06-26 21:50:05 -07001334 if (!nlh)
1335 goto out_nlmsg_trim;
1336 tcm = nlmsg_data(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 tcm->tcm_family = AF_UNSPEC;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07001338 tcm->tcm__pad1 = 0;
1339 tcm->tcm__pad2 = 0;
David S. Miller5ce2d482008-07-08 17:06:30 -07001340 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 tcm->tcm_parent = clid;
1342 tcm->tcm_handle = q->handle;
1343 tcm->tcm_info = atomic_read(&q->refcnt);
David S. Miller1b34ec42012-03-29 05:11:39 -04001344 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1345 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 if (q->ops->dump && q->ops->dump(q, skb) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001347 goto nla_put_failure;
John Fastabend64015852014-09-28 11:53:57 -07001348 qlen = q->q.qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349
Eric Dumazeta2da5702011-01-20 03:48:19 +00001350 stab = rtnl_dereference(q->stab);
1351 if (stab && qdisc_dump_stab(skb, stab) < 0)
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001352 goto nla_put_failure;
1353
Jarek Poplawski102396a2008-08-29 14:21:52 -07001354 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1355 qdisc_root_sleeping_lock(q), &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001356 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357
1358 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001359 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
John Fastabendb0ab6f92014-09-28 11:54:24 -07001361 if (qdisc_is_percpu_stats(q)) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001362 cpu_bstats = q->cpu_bstats;
John Fastabendb0ab6f92014-09-28 11:54:24 -07001363 cpu_qstats = q->cpu_qstats;
1364 }
John Fastabend22e0f8b2014-09-28 11:52:56 -07001365
1366 if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 ||
Eric Dumazetd250a5f2009-10-02 10:32:18 +00001367 gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
John Fastabendb0ab6f92014-09-28 11:54:24 -07001368 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001369 goto nla_put_failure;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001370
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 if (gnet_stats_finish_copy(&d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001372 goto nla_put_failure;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001373
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001374 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 return skb->len;
1376
David S. Miller02ef22c2012-06-26 21:50:05 -07001377out_nlmsg_trim:
Patrick McHardy1e904742008-01-22 22:11:17 -08001378nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001379 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 return -1;
1381}
1382
Eric Dumazet53b0f082010-05-22 20:37:44 +00001383static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1384{
1385 return (q->flags & TCQ_F_BUILTIN) ? true : false;
1386}
1387
Tom Goff7316ae82010-03-19 15:40:13 +00001388static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1389 struct nlmsghdr *n, u32 clid,
1390 struct Qdisc *old, struct Qdisc *new)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391{
1392 struct sk_buff *skb;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001393 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394
1395 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1396 if (!skb)
1397 return -ENOBUFS;
1398
Eric Dumazet53b0f082010-05-22 20:37:44 +00001399 if (old && !tc_qdisc_dump_ignore(old)) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001400 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001401 0, RTM_DELQDISC) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 goto err_out;
1403 }
Eric Dumazet53b0f082010-05-22 20:37:44 +00001404 if (new && !tc_qdisc_dump_ignore(new)) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001405 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001406 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 goto err_out;
1408 }
1409
1410 if (skb->len)
Eric W. Biederman15e47302012-09-07 20:12:54 +00001411 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001412 n->nlmsg_flags & NLM_F_ECHO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413
1414err_out:
1415 kfree_skb(skb);
1416 return -EINVAL;
1417}
1418
David S. Miller30723672008-07-18 22:50:15 -07001419static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1420 struct netlink_callback *cb,
1421 int *q_idx_p, int s_q_idx)
1422{
1423 int ret = 0, q_idx = *q_idx_p;
1424 struct Qdisc *q;
1425
1426 if (!root)
1427 return 0;
1428
1429 q = root;
1430 if (q_idx < s_q_idx) {
1431 q_idx++;
1432 } else {
1433 if (!tc_qdisc_dump_ignore(q) &&
Eric W. Biederman15e47302012-09-07 20:12:54 +00001434 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
David S. Miller30723672008-07-18 22:50:15 -07001435 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1436 goto done;
1437 q_idx++;
1438 }
1439 list_for_each_entry(q, &root->list, list) {
1440 if (q_idx < s_q_idx) {
1441 q_idx++;
1442 continue;
1443 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001444 if (!tc_qdisc_dump_ignore(q) &&
Eric W. Biederman15e47302012-09-07 20:12:54 +00001445 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
David S. Miller30723672008-07-18 22:50:15 -07001446 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1447 goto done;
1448 q_idx++;
1449 }
1450
1451out:
1452 *q_idx_p = q_idx;
1453 return ret;
1454done:
1455 ret = -1;
1456 goto out;
1457}
1458
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1460{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001461 struct net *net = sock_net(skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 int idx, q_idx;
1463 int s_idx, s_q_idx;
1464 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465
1466 s_idx = cb->args[0];
1467 s_q_idx = q_idx = cb->args[1];
stephen hemmingerf1e90162009-11-10 07:54:49 +00001468
Pavel Emelianov7562f872007-05-03 15:13:45 -07001469 idx = 0;
Eric Dumazet15dc36e2014-03-10 17:11:42 -07001470 ASSERT_RTNL();
1471 for_each_netdev(net, dev) {
David S. Miller30723672008-07-18 22:50:15 -07001472 struct netdev_queue *dev_queue;
1473
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 if (idx < s_idx)
Pavel Emelianov7562f872007-05-03 15:13:45 -07001475 goto cont;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 if (idx > s_idx)
1477 s_q_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 q_idx = 0;
David S. Miller30723672008-07-18 22:50:15 -07001479
Patrick McHardyaf356af2009-09-04 06:41:18 +00001480 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
David S. Miller30723672008-07-18 22:50:15 -07001481 goto done;
1482
Eric Dumazet24824a02010-10-02 06:11:55 +00001483 dev_queue = dev_ingress_queue(dev);
1484 if (dev_queue &&
1485 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1486 &q_idx, s_q_idx) < 0)
David S. Miller30723672008-07-18 22:50:15 -07001487 goto done;
1488
Pavel Emelianov7562f872007-05-03 15:13:45 -07001489cont:
1490 idx++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 }
1492
1493done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 cb->args[0] = idx;
1495 cb->args[1] = q_idx;
1496
1497 return skb->len;
1498}
1499
1500
1501
1502/************************************************
1503 * Traffic classes manipulation. *
1504 ************************************************/
1505
1506
1507
Thomas Graf661d2962013-03-21 07:45:29 +00001508static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001510 struct net *net = sock_net(skb->sk);
David S. Miller02ef22c2012-06-26 21:50:05 -07001511 struct tcmsg *tcm = nlmsg_data(n);
Patrick McHardy1e904742008-01-22 22:11:17 -08001512 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 struct net_device *dev;
1514 struct Qdisc *q = NULL;
Eric Dumazet20fea082007-11-14 01:44:41 -08001515 const struct Qdisc_class_ops *cops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 unsigned long cl = 0;
1517 unsigned long new_cl;
Hong zhi guode179c82013-03-25 17:36:33 +00001518 u32 portid;
1519 u32 clid;
1520 u32 qid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 int err;
1522
Stéphane Graber4e8bbb82014-04-30 11:25:43 -04001523 if ((n->nlmsg_type != RTM_GETTCLASS) &&
David S. Miller5f013c9b2014-05-12 13:19:14 -04001524 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
Eric W. Biedermandfc47ef2012-11-16 03:03:00 +00001525 return -EPERM;
1526
Patrick McHardy1e904742008-01-22 22:11:17 -08001527 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1528 if (err < 0)
1529 return err;
1530
Hong zhi guode179c82013-03-25 17:36:33 +00001531 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1532 if (!dev)
1533 return -ENODEV;
1534
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 /*
1536 parent == TC_H_UNSPEC - unspecified parent.
1537 parent == TC_H_ROOT - class is root, which has no parent.
1538 parent == X:0 - parent is root class.
1539 parent == X:Y - parent is a node in hierarchy.
1540 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1541
1542 handle == 0:0 - generate handle from kernel pool.
1543 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1544 handle == X:Y - clear.
1545 handle == X:0 - root class.
1546 */
1547
1548 /* Step 1. Determine qdisc handle X:0 */
1549
Hong zhi guode179c82013-03-25 17:36:33 +00001550 portid = tcm->tcm_parent;
1551 clid = tcm->tcm_handle;
1552 qid = TC_H_MAJ(clid);
1553
Eric W. Biederman15e47302012-09-07 20:12:54 +00001554 if (portid != TC_H_ROOT) {
1555 u32 qid1 = TC_H_MAJ(portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556
1557 if (qid && qid1) {
1558 /* If both majors are known, they must be identical. */
1559 if (qid != qid1)
1560 return -EINVAL;
1561 } else if (qid1) {
1562 qid = qid1;
1563 } else if (qid == 0)
Patrick McHardyaf356af2009-09-04 06:41:18 +00001564 qid = dev->qdisc->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565
1566 /* Now qid is genuine qdisc handle consistent
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001567 * both with parent and child.
1568 *
Eric W. Biederman15e47302012-09-07 20:12:54 +00001569 * TC_H_MAJ(portid) still may be unspecified, complete it now.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00001571 if (portid)
1572 portid = TC_H_MAKE(qid, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 } else {
1574 if (qid == 0)
Patrick McHardyaf356af2009-09-04 06:41:18 +00001575 qid = dev->qdisc->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 }
1577
1578 /* OK. Locate qdisc */
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001579 q = qdisc_lookup(dev, qid);
1580 if (!q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 return -ENOENT;
1582
1583 /* An check that it supports classes */
1584 cops = q->ops->cl_ops;
1585 if (cops == NULL)
1586 return -EINVAL;
1587
1588 /* Now try to get class */
1589 if (clid == 0) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001590 if (portid == TC_H_ROOT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 clid = qid;
1592 } else
1593 clid = TC_H_MAKE(qid, clid);
1594
1595 if (clid)
1596 cl = cops->get(q, clid);
1597
1598 if (cl == 0) {
1599 err = -ENOENT;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001600 if (n->nlmsg_type != RTM_NEWTCLASS ||
1601 !(n->nlmsg_flags & NLM_F_CREATE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 goto out;
1603 } else {
1604 switch (n->nlmsg_type) {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001605 case RTM_NEWTCLASS:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 err = -EEXIST;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001607 if (n->nlmsg_flags & NLM_F_EXCL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 goto out;
1609 break;
1610 case RTM_DELTCLASS:
Patrick McHardyde6d5cd2009-09-04 06:41:16 +00001611 err = -EOPNOTSUPP;
1612 if (cops->delete)
1613 err = cops->delete(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 if (err == 0)
Tom Goff7316ae82010-03-19 15:40:13 +00001615 tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 goto out;
1617 case RTM_GETTCLASS:
Tom Goff7316ae82010-03-19 15:40:13 +00001618 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 goto out;
1620 default:
1621 err = -EINVAL;
1622 goto out;
1623 }
1624 }
1625
1626 new_cl = cl;
Patrick McHardyde6d5cd2009-09-04 06:41:16 +00001627 err = -EOPNOTSUPP;
1628 if (cops->change)
Eric W. Biederman15e47302012-09-07 20:12:54 +00001629 err = cops->change(q, clid, portid, tca, &new_cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 if (err == 0)
Tom Goff7316ae82010-03-19 15:40:13 +00001631 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632
1633out:
1634 if (cl)
1635 cops->put(q, cl);
1636
1637 return err;
1638}
1639
1640
1641static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1642 unsigned long cl,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001643 u32 portid, u32 seq, u16 flags, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644{
1645 struct tcmsg *tcm;
1646 struct nlmsghdr *nlh;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001647 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 struct gnet_dump d;
Eric Dumazet20fea082007-11-14 01:44:41 -08001649 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650
Eric Dumazetfba373d2014-03-10 17:11:43 -07001651 cond_resched();
Eric W. Biederman15e47302012-09-07 20:12:54 +00001652 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
David S. Miller02ef22c2012-06-26 21:50:05 -07001653 if (!nlh)
1654 goto out_nlmsg_trim;
1655 tcm = nlmsg_data(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 tcm->tcm_family = AF_UNSPEC;
Eric Dumazet16ebb5e2009-09-02 02:40:09 +00001657 tcm->tcm__pad1 = 0;
1658 tcm->tcm__pad2 = 0;
David S. Miller5ce2d482008-07-08 17:06:30 -07001659 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 tcm->tcm_parent = q->handle;
1661 tcm->tcm_handle = q->handle;
1662 tcm->tcm_info = 0;
David S. Miller1b34ec42012-03-29 05:11:39 -04001663 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1664 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001666 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667
Jarek Poplawski102396a2008-08-29 14:21:52 -07001668 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1669 qdisc_root_sleeping_lock(q), &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001670 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671
1672 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001673 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674
1675 if (gnet_stats_finish_copy(&d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001676 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001678 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 return skb->len;
1680
David S. Miller02ef22c2012-06-26 21:50:05 -07001681out_nlmsg_trim:
Patrick McHardy1e904742008-01-22 22:11:17 -08001682nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001683 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 return -1;
1685}
1686
Tom Goff7316ae82010-03-19 15:40:13 +00001687static int tclass_notify(struct net *net, struct sk_buff *oskb,
1688 struct nlmsghdr *n, struct Qdisc *q,
1689 unsigned long cl, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690{
1691 struct sk_buff *skb;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001692 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693
1694 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1695 if (!skb)
1696 return -ENOBUFS;
1697
Eric W. Biederman15e47302012-09-07 20:12:54 +00001698 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 kfree_skb(skb);
1700 return -EINVAL;
1701 }
1702
Eric W. Biederman15e47302012-09-07 20:12:54 +00001703 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001704 n->nlmsg_flags & NLM_F_ECHO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705}
1706
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001707struct qdisc_dump_args {
1708 struct qdisc_walker w;
1709 struct sk_buff *skb;
1710 struct netlink_callback *cb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711};
1712
1713static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1714{
1715 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1716
Eric W. Biederman15e47302012-09-07 20:12:54 +00001717 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1719}
1720
David S. Miller30723672008-07-18 22:50:15 -07001721static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1722 struct tcmsg *tcm, struct netlink_callback *cb,
1723 int *t_p, int s_t)
1724{
1725 struct qdisc_dump_args arg;
1726
1727 if (tc_qdisc_dump_ignore(q) ||
1728 *t_p < s_t || !q->ops->cl_ops ||
1729 (tcm->tcm_parent &&
1730 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1731 (*t_p)++;
1732 return 0;
1733 }
1734 if (*t_p > s_t)
1735 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1736 arg.w.fn = qdisc_class_dump;
1737 arg.skb = skb;
1738 arg.cb = cb;
1739 arg.w.stop = 0;
1740 arg.w.skip = cb->args[1];
1741 arg.w.count = 0;
1742 q->ops->cl_ops->walk(q, &arg.w);
1743 cb->args[1] = arg.w.count;
1744 if (arg.w.stop)
1745 return -1;
1746 (*t_p)++;
1747 return 0;
1748}
1749
1750static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1751 struct tcmsg *tcm, struct netlink_callback *cb,
1752 int *t_p, int s_t)
1753{
1754 struct Qdisc *q;
1755
1756 if (!root)
1757 return 0;
1758
1759 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1760 return -1;
1761
1762 list_for_each_entry(q, &root->list, list) {
1763 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1764 return -1;
1765 }
1766
1767 return 0;
1768}
1769
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1771{
David S. Miller02ef22c2012-06-26 21:50:05 -07001772 struct tcmsg *tcm = nlmsg_data(cb->nlh);
David S. Miller30723672008-07-18 22:50:15 -07001773 struct net *net = sock_net(skb->sk);
1774 struct netdev_queue *dev_queue;
1775 struct net_device *dev;
1776 int t, s_t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777
Hong zhi guo573ce262013-03-27 06:47:04 +00001778 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 return 0;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001780 dev = dev_get_by_index(net, tcm->tcm_ifindex);
1781 if (!dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 return 0;
1783
1784 s_t = cb->args[0];
1785 t = 0;
1786
Patrick McHardyaf356af2009-09-04 06:41:18 +00001787 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
David S. Miller30723672008-07-18 22:50:15 -07001788 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789
Eric Dumazet24824a02010-10-02 06:11:55 +00001790 dev_queue = dev_ingress_queue(dev);
1791 if (dev_queue &&
1792 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
1793 &t, s_t) < 0)
David S. Miller30723672008-07-18 22:50:15 -07001794 goto done;
1795
1796done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 cb->args[0] = t;
1798
1799 dev_put(dev);
1800 return skb->len;
1801}
1802
1803/* Main classifier routine: scans classifier chain attached
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001804 * to this qdisc, (optionally) tests for protocol and asks
1805 * specific classifiers.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 */
Eric Dumazetdc7f9f62011-07-05 23:25:42 +00001807int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
Patrick McHardy73ca4912007-07-15 00:02:31 -07001808 struct tcf_result *res)
1809{
Jiri Pirkod8b96052015-01-13 17:13:43 +01001810 __be16 protocol = tc_skb_protocol(skb);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001811 int err;
Patrick McHardy73ca4912007-07-15 00:02:31 -07001812
John Fastabend25d8c0d2014-09-12 20:05:27 -07001813 for (; tp; tp = rcu_dereference_bh(tp->next)) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001814 if (tp->protocol != protocol &&
1815 tp->protocol != htons(ETH_P_ALL))
1816 continue;
1817 err = tp->classify(skb, tp, res);
1818
Florian Westphale578d9c2015-05-11 19:50:41 +02001819 if (err >= 0)
Patrick McHardy73ca4912007-07-15 00:02:31 -07001820 return err;
Patrick McHardy73ca4912007-07-15 00:02:31 -07001821 }
1822 return -1;
1823}
1824EXPORT_SYMBOL(tc_classify_compat);
1825
Eric Dumazetdc7f9f62011-07-05 23:25:42 +00001826int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
Patrick McHardy73ca4912007-07-15 00:02:31 -07001827 struct tcf_result *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828{
1829 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetdc7f9f62011-07-05 23:25:42 +00001831 const struct tcf_proto *otp = tp;
Florian Westphale578d9c2015-05-11 19:50:41 +02001832 int limit = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833reclassify:
Hagen Paul Pfeifer52bc9742011-02-25 05:45:21 +00001834#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835
Patrick McHardy73ca4912007-07-15 00:02:31 -07001836 err = tc_classify_compat(skb, tp, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837#ifdef CONFIG_NET_CLS_ACT
Patrick McHardy73ca4912007-07-15 00:02:31 -07001838 if (err == TC_ACT_RECLASSIFY) {
Patrick McHardy73ca4912007-07-15 00:02:31 -07001839 tp = otp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840
Florian Westphale578d9c2015-05-11 19:50:41 +02001841 if (unlikely(limit++ >= MAX_REC_LOOP)) {
Joe Perchese87cc472012-05-13 21:56:26 +00001842 net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n",
1843 tp->q->ops->id,
1844 tp->prio & 0xffff,
1845 ntohs(tp->protocol));
Patrick McHardy73ca4912007-07-15 00:02:31 -07001846 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 }
Patrick McHardy73ca4912007-07-15 00:02:31 -07001848 goto reclassify;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 }
Patrick McHardy73ca4912007-07-15 00:02:31 -07001850#endif
1851 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852}
Patrick McHardy73ca4912007-07-15 00:02:31 -07001853EXPORT_SYMBOL(tc_classify);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854
Cong Wang1e052be2015-03-06 11:47:59 -08001855bool tcf_destroy(struct tcf_proto *tp, bool force)
Patrick McHardya48b5a62007-03-23 11:29:43 -07001856{
Cong Wang1e052be2015-03-06 11:47:59 -08001857 if (tp->ops->destroy(tp, force)) {
1858 module_put(tp->ops->owner);
1859 kfree_rcu(tp, rcu);
1860 return true;
1861 }
1862
1863 return false;
Patrick McHardya48b5a62007-03-23 11:29:43 -07001864}
1865
John Fastabend25d8c0d2014-09-12 20:05:27 -07001866void tcf_destroy_chain(struct tcf_proto __rcu **fl)
Patrick McHardya48b5a62007-03-23 11:29:43 -07001867{
1868 struct tcf_proto *tp;
1869
John Fastabend25d8c0d2014-09-12 20:05:27 -07001870 while ((tp = rtnl_dereference(*fl)) != NULL) {
1871 RCU_INIT_POINTER(*fl, tp->next);
Cong Wang1e052be2015-03-06 11:47:59 -08001872 tcf_destroy(tp, true);
Patrick McHardya48b5a62007-03-23 11:29:43 -07001873 }
1874}
1875EXPORT_SYMBOL(tcf_destroy_chain);
1876
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877#ifdef CONFIG_PROC_FS
1878static int psched_show(struct seq_file *seq, void *v)
1879{
Patrick McHardy3c0cfc12007-10-10 16:32:41 -07001880 struct timespec ts;
1881
1882 hrtimer_get_res(CLOCK_MONOTONIC, &ts);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 seq_printf(seq, "%08x %08x %08x %08x\n",
Jarek Poplawskica44d6e2009-06-15 02:31:47 -07001884 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
Patrick McHardy514bca32007-03-16 12:34:52 -07001885 1000000,
Patrick McHardy3c0cfc12007-10-10 16:32:41 -07001886 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887
1888 return 0;
1889}
1890
1891static int psched_open(struct inode *inode, struct file *file)
1892{
Tom Goff7e5ab152010-03-30 19:44:56 -07001893 return single_open(file, psched_show, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894}
1895
Arjan van de Venda7071d2007-02-12 00:55:36 -08001896static const struct file_operations psched_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 .owner = THIS_MODULE,
1898 .open = psched_open,
1899 .read = seq_read,
1900 .llseek = seq_lseek,
1901 .release = single_release,
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001902};
Tom Goff7316ae82010-03-19 15:40:13 +00001903
1904static int __net_init psched_net_init(struct net *net)
1905{
1906 struct proc_dir_entry *e;
1907
Gao fengd4beaa62013-02-18 01:34:54 +00001908 e = proc_create("psched", 0, net->proc_net, &psched_fops);
Tom Goff7316ae82010-03-19 15:40:13 +00001909 if (e == NULL)
1910 return -ENOMEM;
1911
1912 return 0;
1913}
1914
1915static void __net_exit psched_net_exit(struct net *net)
1916{
Gao fengece31ff2013-02-18 01:34:56 +00001917 remove_proc_entry("psched", net->proc_net);
Tom Goff7316ae82010-03-19 15:40:13 +00001918}
1919#else
1920static int __net_init psched_net_init(struct net *net)
1921{
1922 return 0;
1923}
1924
1925static void __net_exit psched_net_exit(struct net *net)
1926{
1927}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928#endif
1929
Tom Goff7316ae82010-03-19 15:40:13 +00001930static struct pernet_operations psched_net_ops = {
1931 .init = psched_net_init,
1932 .exit = psched_net_exit,
1933};
1934
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935static int __init pktsched_init(void)
1936{
Tom Goff7316ae82010-03-19 15:40:13 +00001937 int err;
1938
1939 err = register_pernet_subsys(&psched_net_ops);
1940 if (err) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001941 pr_err("pktsched_init: "
Tom Goff7316ae82010-03-19 15:40:13 +00001942 "cannot initialize per netns operations\n");
1943 return err;
1944 }
1945
stephen hemminger6da7c8f2013-08-27 16:19:08 -07001946 register_qdisc(&pfifo_fast_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 register_qdisc(&pfifo_qdisc_ops);
1948 register_qdisc(&bfifo_qdisc_ops);
Hagen Paul Pfeifer57dbb2d2010-01-24 12:30:59 +00001949 register_qdisc(&pfifo_head_drop_qdisc_ops);
David S. Miller6ec1c692009-09-06 01:58:51 -07001950 register_qdisc(&mq_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951
Greg Rosec7ac8672011-06-10 01:27:09 +00001952 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
1953 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
1954 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL);
1955 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
1956 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
1957 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL);
Thomas Grafbe577dd2007-03-22 11:55:50 -07001958
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 return 0;
1960}
1961
1962subsys_initcall(pktsched_init);