blob: f43c8f33f09ef60e0f2a49ffd62f03624798c61b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_api.c Packet scheduler API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 *
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/init.h>
25#include <linux/proc_fs.h>
26#include <linux/seq_file.h>
27#include <linux/kmod.h>
28#include <linux/list.h>
Patrick McHardy41794772007-03-16 01:19:15 -070029#include <linux/hrtimer.h>
Jarek Poplawski25bfcd52008-08-18 20:53:34 -070030#include <linux/lockdep.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020033#include <net/net_namespace.h>
Denis V. Lunevb8542722007-12-01 00:21:31 +110034#include <net/sock.h>
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070035#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <net/pkt_sched.h>
37
Tom Goff7316ae82010-03-19 15:40:13 +000038static int qdisc_notify(struct net *net, struct sk_buff *oskb,
39 struct nlmsghdr *n, u32 clid,
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 struct Qdisc *old, struct Qdisc *new);
Tom Goff7316ae82010-03-19 15:40:13 +000041static int tclass_notify(struct net *net, struct sk_buff *oskb,
42 struct nlmsghdr *n, struct Qdisc *q,
43 unsigned long cl, int event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45/*
46
47 Short review.
48 -------------
49
50 This file consists of two interrelated parts:
51
52 1. queueing disciplines manager frontend.
53 2. traffic classes manager frontend.
54
55 Generally, queueing discipline ("qdisc") is a black box,
56 which is able to enqueue packets and to dequeue them (when
57 device is ready to send something) in order and at times
58 determined by algorithm hidden in it.
59
60 qdisc's are divided to two categories:
61 - "queues", which have no internal structure visible from outside.
62 - "schedulers", which split all the packets to "traffic classes",
63 using "packet classifiers" (look at cls_api.c)
64
65 In turn, classes may have child qdiscs (as rule, queues)
66 attached to them etc. etc. etc.
67
68 The goal of the routines in this file is to translate
69 information supplied by user in the form of handles
70 to more intelligible for kernel form, to make some sanity
71 checks and part of work, which is common to all qdiscs
72 and to provide rtnetlink notifications.
73
74 All real intelligent work is done inside qdisc modules.
75
76
77
78 Every discipline has two major routines: enqueue and dequeue.
79
80 ---dequeue
81
82 dequeue usually returns a skb to send. It is allowed to return NULL,
83 but it does not mean that queue is empty, it just means that
84 discipline does not want to send anything this time.
85 Queue is really empty if q->q.qlen == 0.
86 For complicated disciplines with multiple queues q->q is not
87 real packet queue, but however q->q.qlen must be valid.
88
89 ---enqueue
90
91 enqueue returns 0, if packet was enqueued successfully.
92 If packet (this one or another one) was dropped, it returns
93 not zero error code.
94 NET_XMIT_DROP - this packet dropped
95 Expected action: do not backoff, but wait until queue will clear.
96 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
97 Expected action: backoff or ignore
98 NET_XMIT_POLICED - dropped by police.
99 Expected action: backoff or error to real-time apps.
100
101 Auxiliary routines:
102
Jarek Poplawski99c0db22008-10-31 00:45:27 -0700103 ---peek
104
105 like dequeue but without removing a packet from the queue
106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 ---reset
108
109 returns qdisc to initial state: purge all buffers, clear all
110 timers, counters (except for statistics) etc.
111
112 ---init
113
114 initializes newly created qdisc.
115
116 ---destroy
117
118 destroys resources allocated by init and during lifetime of qdisc.
119
120 ---change
121
122 changes qdisc parameters.
123 */
124
125/* Protects list of registered TC modules. It is pure SMP lock. */
126static DEFINE_RWLOCK(qdisc_mod_lock);
127
128
129/************************************************
130 * Queueing disciplines manipulation. *
131 ************************************************/
132
133
134/* The list of all installed queueing disciplines. */
135
136static struct Qdisc_ops *qdisc_base;
137
Zhi Yong Wu21eb2182014-01-01 04:34:51 +0800138/* Register/unregister queueing discipline */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
140int register_qdisc(struct Qdisc_ops *qops)
141{
142 struct Qdisc_ops *q, **qp;
143 int rc = -EEXIST;
144
145 write_lock(&qdisc_mod_lock);
146 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
147 if (!strcmp(qops->id, q->id))
148 goto out;
149
150 if (qops->enqueue == NULL)
151 qops->enqueue = noop_qdisc_ops.enqueue;
Jarek Poplawski99c0db22008-10-31 00:45:27 -0700152 if (qops->peek == NULL) {
Jarek Poplawski68fd26b2010-08-09 12:18:48 +0000153 if (qops->dequeue == NULL)
Jarek Poplawski99c0db22008-10-31 00:45:27 -0700154 qops->peek = noop_qdisc_ops.peek;
Jarek Poplawski68fd26b2010-08-09 12:18:48 +0000155 else
156 goto out_einval;
Jarek Poplawski99c0db22008-10-31 00:45:27 -0700157 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 if (qops->dequeue == NULL)
159 qops->dequeue = noop_qdisc_ops.dequeue;
160
Jarek Poplawski68fd26b2010-08-09 12:18:48 +0000161 if (qops->cl_ops) {
162 const struct Qdisc_class_ops *cops = qops->cl_ops;
163
Jarek Poplawski3e9e5a52010-08-10 22:31:20 +0000164 if (!(cops->get && cops->put && cops->walk && cops->leaf))
Jarek Poplawski68fd26b2010-08-09 12:18:48 +0000165 goto out_einval;
166
167 if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
168 goto out_einval;
169 }
170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 qops->next = NULL;
172 *qp = qops;
173 rc = 0;
174out:
175 write_unlock(&qdisc_mod_lock);
176 return rc;
Jarek Poplawski68fd26b2010-08-09 12:18:48 +0000177
178out_einval:
179 rc = -EINVAL;
180 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800182EXPORT_SYMBOL(register_qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
184int unregister_qdisc(struct Qdisc_ops *qops)
185{
186 struct Qdisc_ops *q, **qp;
187 int err = -ENOENT;
188
189 write_lock(&qdisc_mod_lock);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000190 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 if (q == qops)
192 break;
193 if (q) {
194 *qp = q->next;
195 q->next = NULL;
196 err = 0;
197 }
198 write_unlock(&qdisc_mod_lock);
199 return err;
200}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800201EXPORT_SYMBOL(unregister_qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
stephen hemminger6da7c8f2013-08-27 16:19:08 -0700203/* Get default qdisc if not otherwise specified */
204void qdisc_get_default(char *name, size_t len)
205{
206 read_lock(&qdisc_mod_lock);
207 strlcpy(name, default_qdisc_ops->id, len);
208 read_unlock(&qdisc_mod_lock);
209}
210
211static struct Qdisc_ops *qdisc_lookup_default(const char *name)
212{
213 struct Qdisc_ops *q = NULL;
214
215 for (q = qdisc_base; q; q = q->next) {
216 if (!strcmp(name, q->id)) {
217 if (!try_module_get(q->owner))
218 q = NULL;
219 break;
220 }
221 }
222
223 return q;
224}
225
226/* Set new default qdisc to use */
227int qdisc_set_default(const char *name)
228{
229 const struct Qdisc_ops *ops;
230
231 if (!capable(CAP_NET_ADMIN))
232 return -EPERM;
233
234 write_lock(&qdisc_mod_lock);
235 ops = qdisc_lookup_default(name);
236 if (!ops) {
237 /* Not found, drop lock and try to load module */
238 write_unlock(&qdisc_mod_lock);
239 request_module("sch_%s", name);
240 write_lock(&qdisc_mod_lock);
241
242 ops = qdisc_lookup_default(name);
243 }
244
245 if (ops) {
246 /* Set new default */
247 module_put(default_qdisc_ops->owner);
248 default_qdisc_ops = ops;
249 }
250 write_unlock(&qdisc_mod_lock);
251
252 return ops ? 0 : -ENOENT;
253}
254
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255/* We know handle. Find qdisc among all qdisc's attached to device
256 (root qdisc, all its children, children of children etc.)
257 */
258
Hannes Eder6113b742008-11-28 03:06:46 -0800259static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
David S. Miller8123b422008-08-08 23:23:39 -0700260{
261 struct Qdisc *q;
262
263 if (!(root->flags & TCQ_F_BUILTIN) &&
264 root->handle == handle)
265 return root;
266
267 list_for_each_entry(q, &root->list, list) {
268 if (q->handle == handle)
269 return q;
270 }
271 return NULL;
272}
273
Eric Dumazet95dc1922013-12-05 11:12:02 -0800274void qdisc_list_add(struct Qdisc *q)
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700275{
Eric Dumazet37314362014-03-08 08:01:19 -0800276 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
277 struct Qdisc *root = qdisc_dev(q)->qdisc;
Eric Dumazete57a7842013-12-12 15:41:56 -0800278
Eric Dumazet37314362014-03-08 08:01:19 -0800279 WARN_ON_ONCE(root == &noop_qdisc);
Eric Dumazete57a7842013-12-12 15:41:56 -0800280 list_add_tail(&q->list, &root->list);
Eric Dumazet37314362014-03-08 08:01:19 -0800281 }
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700282}
Eric Dumazet95dc1922013-12-05 11:12:02 -0800283EXPORT_SYMBOL(qdisc_list_add);
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700284
285void qdisc_list_del(struct Qdisc *q)
286{
Jarek Poplawskif6486d42008-11-25 13:56:06 -0800287 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700288 list_del(&q->list);
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700289}
290EXPORT_SYMBOL(qdisc_list_del);
291
David S. Milleread81cc2008-07-17 00:50:32 -0700292struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
Patrick McHardy43effa12006-11-29 17:35:48 -0800293{
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700294 struct Qdisc *q;
295
Patrick McHardyaf356af2009-09-04 06:41:18 +0000296 q = qdisc_match_from_root(dev->qdisc, handle);
297 if (q)
298 goto out;
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700299
Eric Dumazet24824a02010-10-02 06:11:55 +0000300 if (dev_ingress_queue(dev))
301 q = qdisc_match_from_root(
302 dev_ingress_queue(dev)->qdisc_sleeping,
303 handle);
Jarek Poplawskif6486d42008-11-25 13:56:06 -0800304out:
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700305 return q;
Patrick McHardy43effa12006-11-29 17:35:48 -0800306}
307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
309{
310 unsigned long cl;
311 struct Qdisc *leaf;
Eric Dumazet20fea082007-11-14 01:44:41 -0800312 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
314 if (cops == NULL)
315 return NULL;
316 cl = cops->get(p, classid);
317
318 if (cl == 0)
319 return NULL;
320 leaf = cops->leaf(p, cl);
321 cops->put(p, cl);
322 return leaf;
323}
324
325/* Find queueing discipline by name */
326
Patrick McHardy1e904742008-01-22 22:11:17 -0800327static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328{
329 struct Qdisc_ops *q = NULL;
330
331 if (kind) {
332 read_lock(&qdisc_mod_lock);
333 for (q = qdisc_base; q; q = q->next) {
Patrick McHardy1e904742008-01-22 22:11:17 -0800334 if (nla_strcmp(kind, q->id) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 if (!try_module_get(q->owner))
336 q = NULL;
337 break;
338 }
339 }
340 read_unlock(&qdisc_mod_lock);
341 }
342 return q;
343}
344
Jesper Dangaard Brouer8a8e3d82013-08-14 23:47:11 +0200345/* The linklayer setting were not transferred from iproute2, in older
346 * versions, and the rate tables lookup systems have been dropped in
347 * the kernel. To keep backward compatible with older iproute2 tc
348 * utils, we detect the linklayer setting by detecting if the rate
349 * table were modified.
350 *
351 * For linklayer ATM table entries, the rate table will be aligned to
352 * 48 bytes, thus some table entries will contain the same value. The
353 * mpu (min packet unit) is also encoded into the old rate table, thus
354 * starting from the mpu, we find low and high table entries for
355 * mapping this cell. If these entries contain the same value, when
356 * the rate tables have been modified for linklayer ATM.
357 *
358 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
359 * and then roundup to the next cell, calc the table entry one below,
360 * and compare.
361 */
362static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
363{
364 int low = roundup(r->mpu, 48);
365 int high = roundup(low+1, 48);
366 int cell_low = low >> r->cell_log;
367 int cell_high = (high >> r->cell_log) - 1;
368
369 /* rtab is too inaccurate at rates > 100Mbit/s */
370 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
371 pr_debug("TC linklayer: Giving up ATM detection\n");
372 return TC_LINKLAYER_ETHERNET;
373 }
374
375 if ((cell_high > cell_low) && (cell_high < 256)
376 && (rtab[cell_low] == rtab[cell_high])) {
377 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
378 cell_low, cell_high, rtab[cell_high]);
379 return TC_LINKLAYER_ATM;
380 }
381 return TC_LINKLAYER_ETHERNET;
382}
383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384static struct qdisc_rate_table *qdisc_rtab_list;
385
Patrick McHardy1e904742008-01-22 22:11:17 -0800386struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387{
388 struct qdisc_rate_table *rtab;
389
Eric Dumazet40edeff2013-06-02 11:15:55 +0000390 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
391 nla_len(tab) != TC_RTAB_SIZE)
392 return NULL;
393
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
Eric Dumazet40edeff2013-06-02 11:15:55 +0000395 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
396 !memcmp(&rtab->data, nla_data(tab), 1024)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 rtab->refcnt++;
398 return rtab;
399 }
400 }
401
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
403 if (rtab) {
404 rtab->rate = *r;
405 rtab->refcnt = 1;
Patrick McHardy1e904742008-01-22 22:11:17 -0800406 memcpy(rtab->data, nla_data(tab), 1024);
Jesper Dangaard Brouer8a8e3d82013-08-14 23:47:11 +0200407 if (r->linklayer == TC_LINKLAYER_UNAWARE)
408 r->linklayer = __detect_linklayer(r, rtab->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 rtab->next = qdisc_rtab_list;
410 qdisc_rtab_list = rtab;
411 }
412 return rtab;
413}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800414EXPORT_SYMBOL(qdisc_get_rtab);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
416void qdisc_put_rtab(struct qdisc_rate_table *tab)
417{
418 struct qdisc_rate_table *rtab, **rtabp;
419
420 if (!tab || --tab->refcnt)
421 return;
422
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000423 for (rtabp = &qdisc_rtab_list;
424 (rtab = *rtabp) != NULL;
425 rtabp = &rtab->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 if (rtab == tab) {
427 *rtabp = rtab->next;
428 kfree(rtab);
429 return;
430 }
431 }
432}
Patrick McHardy62e3ba12008-01-22 22:10:23 -0800433EXPORT_SYMBOL(qdisc_put_rtab);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700435static LIST_HEAD(qdisc_stab_list);
436static DEFINE_SPINLOCK(qdisc_stab_lock);
437
438static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
439 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
440 [TCA_STAB_DATA] = { .type = NLA_BINARY },
441};
442
443static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
444{
445 struct nlattr *tb[TCA_STAB_MAX + 1];
446 struct qdisc_size_table *stab;
447 struct tc_sizespec *s;
448 unsigned int tsize = 0;
449 u16 *tab = NULL;
450 int err;
451
452 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
453 if (err < 0)
454 return ERR_PTR(err);
455 if (!tb[TCA_STAB_BASE])
456 return ERR_PTR(-EINVAL);
457
458 s = nla_data(tb[TCA_STAB_BASE]);
459
460 if (s->tsize > 0) {
461 if (!tb[TCA_STAB_DATA])
462 return ERR_PTR(-EINVAL);
463 tab = nla_data(tb[TCA_STAB_DATA]);
464 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
465 }
466
Dan Carpenter00093fa2010-08-14 11:09:49 +0000467 if (tsize != s->tsize || (!tab && tsize > 0))
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700468 return ERR_PTR(-EINVAL);
469
David S. Millerf3b96052008-08-18 22:33:05 -0700470 spin_lock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700471
472 list_for_each_entry(stab, &qdisc_stab_list, list) {
473 if (memcmp(&stab->szopts, s, sizeof(*s)))
474 continue;
475 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
476 continue;
477 stab->refcnt++;
David S. Millerf3b96052008-08-18 22:33:05 -0700478 spin_unlock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700479 return stab;
480 }
481
David S. Millerf3b96052008-08-18 22:33:05 -0700482 spin_unlock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700483
484 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
485 if (!stab)
486 return ERR_PTR(-ENOMEM);
487
488 stab->refcnt = 1;
489 stab->szopts = *s;
490 if (tsize > 0)
491 memcpy(stab->data, tab, tsize * sizeof(u16));
492
David S. Millerf3b96052008-08-18 22:33:05 -0700493 spin_lock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700494 list_add_tail(&stab->list, &qdisc_stab_list);
David S. Millerf3b96052008-08-18 22:33:05 -0700495 spin_unlock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700496
497 return stab;
498}
499
Eric Dumazeta2da5702011-01-20 03:48:19 +0000500static void stab_kfree_rcu(struct rcu_head *head)
501{
502 kfree(container_of(head, struct qdisc_size_table, rcu));
503}
504
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700505void qdisc_put_stab(struct qdisc_size_table *tab)
506{
507 if (!tab)
508 return;
509
David S. Millerf3b96052008-08-18 22:33:05 -0700510 spin_lock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700511
512 if (--tab->refcnt == 0) {
513 list_del(&tab->list);
Eric Dumazeta2da5702011-01-20 03:48:19 +0000514 call_rcu_bh(&tab->rcu, stab_kfree_rcu);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700515 }
516
David S. Millerf3b96052008-08-18 22:33:05 -0700517 spin_unlock(&qdisc_stab_lock);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700518}
519EXPORT_SYMBOL(qdisc_put_stab);
520
521static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
522{
523 struct nlattr *nest;
524
525 nest = nla_nest_start(skb, TCA_STAB);
Patrick McHardy3aa46142008-11-20 04:07:14 -0800526 if (nest == NULL)
527 goto nla_put_failure;
David S. Miller1b34ec42012-03-29 05:11:39 -0400528 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
529 goto nla_put_failure;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700530 nla_nest_end(skb, nest);
531
532 return skb->len;
533
534nla_put_failure:
535 return -1;
536}
537
Eric Dumazeta2da5702011-01-20 03:48:19 +0000538void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700539{
540 int pkt_len, slot;
541
542 pkt_len = skb->len + stab->szopts.overhead;
543 if (unlikely(!stab->szopts.tsize))
544 goto out;
545
546 slot = pkt_len + stab->szopts.cell_align;
547 if (unlikely(slot < 0))
548 slot = 0;
549
550 slot >>= stab->szopts.cell_log;
551 if (likely(slot < stab->szopts.tsize))
552 pkt_len = stab->data[slot];
553 else
554 pkt_len = stab->data[stab->szopts.tsize - 1] *
555 (slot / stab->szopts.tsize) +
556 stab->data[slot % stab->szopts.tsize];
557
558 pkt_len <<= stab->szopts.size_log;
559out:
560 if (unlikely(pkt_len < 1))
561 pkt_len = 1;
562 qdisc_skb_cb(skb)->pkt_len = pkt_len;
563}
Eric Dumazeta2da5702011-01-20 03:48:19 +0000564EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700565
Florian Westphal6e765a02014-06-11 20:35:18 +0200566void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
Jarek Poplawskib00355d2009-02-01 01:12:42 -0800567{
568 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000569 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
570 txt, qdisc->ops->id, qdisc->handle >> 16);
Jarek Poplawskib00355d2009-02-01 01:12:42 -0800571 qdisc->flags |= TCQ_F_WARN_NONWC;
572 }
573}
574EXPORT_SYMBOL(qdisc_warn_nonwc);
575
Patrick McHardy41794772007-03-16 01:19:15 -0700576static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
577{
578 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
David S. Miller2fbd3da2009-09-01 17:59:25 -0700579 timer);
Patrick McHardy41794772007-03-16 01:19:15 -0700580
John Fastabend1e203c12014-10-02 22:43:09 -0700581 rcu_read_lock();
Eric Dumazetfd245a42011-01-20 05:27:16 +0000582 qdisc_unthrottled(wd->qdisc);
David S. Miller8608db02008-08-18 20:51:18 -0700583 __netif_schedule(qdisc_root(wd->qdisc));
John Fastabend1e203c12014-10-02 22:43:09 -0700584 rcu_read_unlock();
Stephen Hemminger19365022007-03-22 12:18:35 -0700585
Patrick McHardy41794772007-03-16 01:19:15 -0700586 return HRTIMER_NORESTART;
587}
588
589void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
590{
Eric Dumazet4a8e3202014-09-20 18:01:30 -0700591 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
David S. Miller2fbd3da2009-09-01 17:59:25 -0700592 wd->timer.function = qdisc_watchdog;
Patrick McHardy41794772007-03-16 01:19:15 -0700593 wd->qdisc = qdisc;
594}
595EXPORT_SYMBOL(qdisc_watchdog_init);
596
Eric Dumazetf2600cf2014-10-04 10:11:31 -0700597void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle)
Patrick McHardy41794772007-03-16 01:19:15 -0700598{
Jarek Poplawski2540e052008-08-21 05:11:14 -0700599 if (test_bit(__QDISC_STATE_DEACTIVATED,
600 &qdisc_root_sleeping(wd->qdisc)->state))
601 return;
602
Eric Dumazetf2600cf2014-10-04 10:11:31 -0700603 if (throttle)
604 qdisc_throttled(wd->qdisc);
Eric Dumazet46baac32012-10-20 00:40:51 +0000605
606 hrtimer_start(&wd->timer,
Jiri Pirko34c5d292013-02-12 00:12:04 +0000607 ns_to_ktime(expires),
Eric Dumazet4a8e3202014-09-20 18:01:30 -0700608 HRTIMER_MODE_ABS_PINNED);
Patrick McHardy41794772007-03-16 01:19:15 -0700609}
Jiri Pirko34c5d292013-02-12 00:12:04 +0000610EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
Patrick McHardy41794772007-03-16 01:19:15 -0700611
612void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
613{
David S. Miller2fbd3da2009-09-01 17:59:25 -0700614 hrtimer_cancel(&wd->timer);
Eric Dumazetfd245a42011-01-20 05:27:16 +0000615 qdisc_unthrottled(wd->qdisc);
Patrick McHardy41794772007-03-16 01:19:15 -0700616}
617EXPORT_SYMBOL(qdisc_watchdog_cancel);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618
Adrian Bunka94f7792008-07-22 14:20:11 -0700619static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
Patrick McHardy6fe1c7a2008-07-05 23:21:31 -0700620{
621 unsigned int size = n * sizeof(struct hlist_head), i;
622 struct hlist_head *h;
623
624 if (size <= PAGE_SIZE)
625 h = kmalloc(size, GFP_KERNEL);
626 else
627 h = (struct hlist_head *)
628 __get_free_pages(GFP_KERNEL, get_order(size));
629
630 if (h != NULL) {
631 for (i = 0; i < n; i++)
632 INIT_HLIST_HEAD(&h[i]);
633 }
634 return h;
635}
636
637static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
638{
639 unsigned int size = n * sizeof(struct hlist_head);
640
641 if (size <= PAGE_SIZE)
642 kfree(h);
643 else
644 free_pages((unsigned long)h, get_order(size));
645}
646
647void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
648{
649 struct Qdisc_class_common *cl;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800650 struct hlist_node *next;
Patrick McHardy6fe1c7a2008-07-05 23:21:31 -0700651 struct hlist_head *nhash, *ohash;
652 unsigned int nsize, nmask, osize;
653 unsigned int i, h;
654
655 /* Rehash when load factor exceeds 0.75 */
656 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
657 return;
658 nsize = clhash->hashsize * 2;
659 nmask = nsize - 1;
660 nhash = qdisc_class_hash_alloc(nsize);
661 if (nhash == NULL)
662 return;
663
664 ohash = clhash->hash;
665 osize = clhash->hashsize;
666
667 sch_tree_lock(sch);
668 for (i = 0; i < osize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800669 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
Patrick McHardy6fe1c7a2008-07-05 23:21:31 -0700670 h = qdisc_class_hash(cl->classid, nmask);
671 hlist_add_head(&cl->hnode, &nhash[h]);
672 }
673 }
674 clhash->hash = nhash;
675 clhash->hashsize = nsize;
676 clhash->hashmask = nmask;
677 sch_tree_unlock(sch);
678
679 qdisc_class_hash_free(ohash, osize);
680}
681EXPORT_SYMBOL(qdisc_class_hash_grow);
682
683int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
684{
685 unsigned int size = 4;
686
687 clhash->hash = qdisc_class_hash_alloc(size);
688 if (clhash->hash == NULL)
689 return -ENOMEM;
690 clhash->hashsize = size;
691 clhash->hashmask = size - 1;
692 clhash->hashelems = 0;
693 return 0;
694}
695EXPORT_SYMBOL(qdisc_class_hash_init);
696
697void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
698{
699 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
700}
701EXPORT_SYMBOL(qdisc_class_hash_destroy);
702
703void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
704 struct Qdisc_class_common *cl)
705{
706 unsigned int h;
707
708 INIT_HLIST_NODE(&cl->hnode);
709 h = qdisc_class_hash(cl->classid, clhash->hashmask);
710 hlist_add_head(&cl->hnode, &clhash->hash[h]);
711 clhash->hashelems++;
712}
713EXPORT_SYMBOL(qdisc_class_hash_insert);
714
715void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
716 struct Qdisc_class_common *cl)
717{
718 hlist_del(&cl->hnode);
719 clhash->hashelems--;
720}
721EXPORT_SYMBOL(qdisc_class_hash_remove);
722
Eric Dumazetfa0f5aa2012-01-03 00:00:11 +0000723/* Allocate an unique handle from space managed by kernel
724 * Possible range is [8000-FFFF]:0000 (0x8000 values)
725 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726static u32 qdisc_alloc_handle(struct net_device *dev)
727{
Eric Dumazetfa0f5aa2012-01-03 00:00:11 +0000728 int i = 0x8000;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
730
731 do {
732 autohandle += TC_H_MAKE(0x10000U, 0);
733 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
734 autohandle = TC_H_MAKE(0x80000000U, 0);
Eric Dumazetfa0f5aa2012-01-03 00:00:11 +0000735 if (!qdisc_lookup(dev, autohandle))
736 return autohandle;
737 cond_resched();
738 } while (--i > 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
Eric Dumazetfa0f5aa2012-01-03 00:00:11 +0000740 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741}
742
Patrick McHardy43effa12006-11-29 17:35:48 -0800743void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
744{
Eric Dumazet20fea082007-11-14 01:44:41 -0800745 const struct Qdisc_class_ops *cops;
Patrick McHardy43effa12006-11-29 17:35:48 -0800746 unsigned long cl;
747 u32 parentid;
Eric Dumazet2c8c8e62013-10-07 08:32:32 -0700748 int drops;
Patrick McHardy43effa12006-11-29 17:35:48 -0800749
750 if (n == 0)
751 return;
Eric Dumazet2c8c8e62013-10-07 08:32:32 -0700752 drops = max_t(int, n, 0);
Patrick McHardy43effa12006-11-29 17:35:48 -0800753 while ((parentid = sch->parent)) {
Jarek Poplawski066a3b52008-04-14 15:10:42 -0700754 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
755 return;
756
David S. Miller5ce2d482008-07-08 17:06:30 -0700757 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
Patrick McHardyffc8fef2007-07-30 17:11:50 -0700758 if (sch == NULL) {
759 WARN_ON(parentid != TC_H_ROOT);
760 return;
761 }
Patrick McHardy43effa12006-11-29 17:35:48 -0800762 cops = sch->ops->cl_ops;
763 if (cops->qlen_notify) {
764 cl = cops->get(sch, parentid);
765 cops->qlen_notify(sch, cl);
766 cops->put(sch, cl);
767 }
768 sch->q.qlen -= n;
John Fastabend25331d62014-09-28 11:53:29 -0700769 __qdisc_qstats_drop(sch, drops);
Patrick McHardy43effa12006-11-29 17:35:48 -0800770 }
771}
772EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773
Tom Goff7316ae82010-03-19 15:40:13 +0000774static void notify_and_destroy(struct net *net, struct sk_buff *skb,
775 struct nlmsghdr *n, u32 clid,
David S. Miller99194cf2008-07-17 04:54:10 -0700776 struct Qdisc *old, struct Qdisc *new)
777{
778 if (new || old)
Tom Goff7316ae82010-03-19 15:40:13 +0000779 qdisc_notify(net, skb, n, clid, old, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
David S. Miller4d8863a2008-08-18 21:03:15 -0700781 if (old)
David S. Miller99194cf2008-07-17 04:54:10 -0700782 qdisc_destroy(old);
David S. Miller99194cf2008-07-17 04:54:10 -0700783}
784
785/* Graft qdisc "new" to class "classid" of qdisc "parent" or
786 * to device "dev".
787 *
788 * When appropriate send a netlink notification using 'skb'
789 * and "n".
790 *
791 * On success, destroy old qdisc.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 */
793
794static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
David S. Miller99194cf2008-07-17 04:54:10 -0700795 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
796 struct Qdisc *new, struct Qdisc *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797{
David S. Miller99194cf2008-07-17 04:54:10 -0700798 struct Qdisc *q = old;
Tom Goff7316ae82010-03-19 15:40:13 +0000799 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900802 if (parent == NULL) {
David S. Miller99194cf2008-07-17 04:54:10 -0700803 unsigned int i, num_q, ingress;
804
805 ingress = 0;
806 num_q = dev->num_tx_queues;
David S. Miller8d50b532008-07-30 02:37:46 -0700807 if ((q && q->flags & TCQ_F_INGRESS) ||
808 (new && new->flags & TCQ_F_INGRESS)) {
David S. Miller99194cf2008-07-17 04:54:10 -0700809 num_q = 1;
810 ingress = 1;
Eric Dumazet24824a02010-10-02 06:11:55 +0000811 if (!dev_ingress_queue(dev))
812 return -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 }
David S. Miller99194cf2008-07-17 04:54:10 -0700814
815 if (dev->flags & IFF_UP)
816 dev_deactivate(dev);
817
WANG Cong86e363d2015-05-26 16:08:48 -0700818 if (new && new->ops->attach)
819 goto skip;
David S. Miller6ec1c692009-09-06 01:58:51 -0700820
David S. Miller99194cf2008-07-17 04:54:10 -0700821 for (i = 0; i < num_q; i++) {
Eric Dumazet24824a02010-10-02 06:11:55 +0000822 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
David S. Miller99194cf2008-07-17 04:54:10 -0700823
824 if (!ingress)
825 dev_queue = netdev_get_tx_queue(dev, i);
826
David S. Miller8d50b532008-07-30 02:37:46 -0700827 old = dev_graft_qdisc(dev_queue, new);
828 if (new && i > 0)
829 atomic_inc(&new->refcnt);
830
Jarek Poplawski036d6a62009-09-13 22:35:44 +0000831 if (!ingress)
832 qdisc_destroy(old);
David S. Miller99194cf2008-07-17 04:54:10 -0700833 }
834
WANG Cong86e363d2015-05-26 16:08:48 -0700835skip:
Jarek Poplawski036d6a62009-09-13 22:35:44 +0000836 if (!ingress) {
Tom Goff7316ae82010-03-19 15:40:13 +0000837 notify_and_destroy(net, skb, n, classid,
838 dev->qdisc, new);
Jarek Poplawski036d6a62009-09-13 22:35:44 +0000839 if (new && !new->ops->attach)
840 atomic_inc(&new->refcnt);
841 dev->qdisc = new ? : &noop_qdisc;
WANG Cong86e363d2015-05-26 16:08:48 -0700842
843 if (new && new->ops->attach)
844 new->ops->attach(new);
Jarek Poplawski036d6a62009-09-13 22:35:44 +0000845 } else {
Tom Goff7316ae82010-03-19 15:40:13 +0000846 notify_and_destroy(net, skb, n, classid, old, new);
Jarek Poplawski036d6a62009-09-13 22:35:44 +0000847 }
Patrick McHardyaf356af2009-09-04 06:41:18 +0000848
David S. Miller99194cf2008-07-17 04:54:10 -0700849 if (dev->flags & IFF_UP)
850 dev_activate(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 } else {
Eric Dumazet20fea082007-11-14 01:44:41 -0800852 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853
Patrick McHardyc9f1d032009-09-04 06:41:13 +0000854 err = -EOPNOTSUPP;
855 if (cops && cops->graft) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 unsigned long cl = cops->get(parent, classid);
857 if (cl) {
David S. Miller99194cf2008-07-17 04:54:10 -0700858 err = cops->graft(parent, cl, new, &old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 cops->put(parent, cl);
Patrick McHardyc9f1d032009-09-04 06:41:13 +0000860 } else
861 err = -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 }
David S. Miller99194cf2008-07-17 04:54:10 -0700863 if (!err)
Tom Goff7316ae82010-03-19 15:40:13 +0000864 notify_and_destroy(net, skb, n, classid, old, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 }
866 return err;
867}
868
Jarek Poplawski25bfcd52008-08-18 20:53:34 -0700869/* lockdep annotation is needed for ingress; egress gets it only for name */
870static struct lock_class_key qdisc_tx_lock;
871static struct lock_class_key qdisc_rx_lock;
872
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873/*
874 Allocate and initialize new qdisc.
875
876 Parameters are passed via opt.
877 */
878
879static struct Qdisc *
David S. Millerbb949fb2008-07-08 16:55:56 -0700880qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
Patrick McHardy23bcf632009-09-09 18:11:23 -0700881 struct Qdisc *p, u32 parent, u32 handle,
882 struct nlattr **tca, int *errp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883{
884 int err;
Patrick McHardy1e904742008-01-22 22:11:17 -0800885 struct nlattr *kind = tca[TCA_KIND];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 struct Qdisc *sch;
887 struct Qdisc_ops *ops;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700888 struct qdisc_size_table *stab;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
890 ops = qdisc_lookup_ops(kind);
Johannes Berg95a5afc2008-10-16 15:24:51 -0700891#ifdef CONFIG_MODULES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 if (ops == NULL && kind != NULL) {
893 char name[IFNAMSIZ];
Patrick McHardy1e904742008-01-22 22:11:17 -0800894 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 /* We dropped the RTNL semaphore in order to
896 * perform the module load. So, even if we
897 * succeeded in loading the module we have to
898 * tell the caller to replay the request. We
899 * indicate this using -EAGAIN.
900 * We replay the request because the device may
901 * go away in the mean time.
902 */
903 rtnl_unlock();
904 request_module("sch_%s", name);
905 rtnl_lock();
906 ops = qdisc_lookup_ops(kind);
907 if (ops != NULL) {
908 /* We will try again qdisc_lookup_ops,
909 * so don't keep a reference.
910 */
911 module_put(ops->owner);
912 err = -EAGAIN;
913 goto err_out;
914 }
915 }
916 }
917#endif
918
Jamal Hadi Salimb9e2cc02006-08-03 16:36:51 -0700919 err = -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 if (ops == NULL)
921 goto err_out;
922
David S. Miller5ce2d482008-07-08 17:06:30 -0700923 sch = qdisc_alloc(dev_queue, ops);
Thomas Graf3d54b822005-07-05 14:15:09 -0700924 if (IS_ERR(sch)) {
925 err = PTR_ERR(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 goto err_out2;
Thomas Graf3d54b822005-07-05 14:15:09 -0700927 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928
Patrick McHardyffc8fef2007-07-30 17:11:50 -0700929 sch->parent = parent;
930
Thomas Graf3d54b822005-07-05 14:15:09 -0700931 if (handle == TC_H_INGRESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 sch->flags |= TCQ_F_INGRESS;
Thomas Graf3d54b822005-07-05 14:15:09 -0700933 handle = TC_H_MAKE(TC_H_INGRESS, 0);
Jarek Poplawski25bfcd52008-08-18 20:53:34 -0700934 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
Patrick McHardyfd44de72007-04-16 17:07:08 -0700935 } else {
Patrick McHardyfd44de72007-04-16 17:07:08 -0700936 if (handle == 0) {
937 handle = qdisc_alloc_handle(dev);
938 err = -ENOMEM;
939 if (handle == 0)
940 goto err_out3;
941 }
Jarek Poplawski25bfcd52008-08-18 20:53:34 -0700942 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
Eric Dumazet1abbe132012-12-11 15:54:33 +0000943 if (!netif_is_multiqueue(dev))
944 sch->flags |= TCQ_F_ONETXQUEUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 }
946
Thomas Graf3d54b822005-07-05 14:15:09 -0700947 sch->handle = handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948
Patrick McHardy1e904742008-01-22 22:11:17 -0800949 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
John Fastabend22e0f8b2014-09-28 11:52:56 -0700950 if (qdisc_is_percpu_stats(sch)) {
951 sch->cpu_bstats =
Sabrina Dubroca7c1c97d2014-10-21 11:23:30 +0200952 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
John Fastabend22e0f8b2014-09-28 11:52:56 -0700953 if (!sch->cpu_bstats)
954 goto err_out4;
John Fastabendb0ab6f92014-09-28 11:54:24 -0700955
956 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
957 if (!sch->cpu_qstats)
958 goto err_out4;
John Fastabend22e0f8b2014-09-28 11:52:56 -0700959 }
960
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700961 if (tca[TCA_STAB]) {
962 stab = qdisc_get_stab(tca[TCA_STAB]);
963 if (IS_ERR(stab)) {
964 err = PTR_ERR(stab);
Jarek Poplawski7c64b9f2009-09-15 23:42:05 -0700965 goto err_out4;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700966 }
Eric Dumazeta2da5702011-01-20 03:48:19 +0000967 rcu_assign_pointer(sch->stab, stab);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700968 }
Patrick McHardy1e904742008-01-22 22:11:17 -0800969 if (tca[TCA_RATE]) {
Jarek Poplawskif6f9b932008-08-27 02:25:17 -0700970 spinlock_t *root_lock;
971
Patrick McHardy23bcf632009-09-09 18:11:23 -0700972 err = -EOPNOTSUPP;
973 if (sch->flags & TCQ_F_MQROOT)
974 goto err_out4;
975
Jarek Poplawskif6f9b932008-08-27 02:25:17 -0700976 if ((sch->parent != TC_H_ROOT) &&
Patrick McHardy23bcf632009-09-09 18:11:23 -0700977 !(sch->flags & TCQ_F_INGRESS) &&
978 (!p || !(p->flags & TCQ_F_MQROOT)))
Jarek Poplawskif6f9b932008-08-27 02:25:17 -0700979 root_lock = qdisc_root_sleeping_lock(sch);
980 else
981 root_lock = qdisc_lock(sch);
982
John Fastabend22e0f8b2014-09-28 11:52:56 -0700983 err = gen_new_estimator(&sch->bstats,
984 sch->cpu_bstats,
985 &sch->rate_est,
986 root_lock,
987 tca[TCA_RATE]);
Patrick McHardy23bcf632009-09-09 18:11:23 -0700988 if (err)
989 goto err_out4;
Thomas Graf023e09a2005-07-05 14:15:53 -0700990 }
Jarek Poplawskif6e0b232008-08-22 03:24:05 -0700991
992 qdisc_list_add(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 return sch;
995 }
996err_out3:
997 dev_put(dev);
Thomas Graf3d54b822005-07-05 14:15:09 -0700998 kfree((char *) sch - sch->padded);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999err_out2:
1000 module_put(ops->owner);
1001err_out:
1002 *errp = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 return NULL;
Patrick McHardy23bcf632009-09-09 18:11:23 -07001004
1005err_out4:
John Fastabend22e0f8b2014-09-28 11:52:56 -07001006 free_percpu(sch->cpu_bstats);
John Fastabendb0ab6f92014-09-28 11:54:24 -07001007 free_percpu(sch->cpu_qstats);
Patrick McHardy23bcf632009-09-09 18:11:23 -07001008 /*
1009 * Any broken qdiscs that would require a ops->reset() here?
1010 * The qdisc was never in action so it shouldn't be necessary.
1011 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00001012 qdisc_put_stab(rtnl_dereference(sch->stab));
Patrick McHardy23bcf632009-09-09 18:11:23 -07001013 if (ops->destroy)
1014 ops->destroy(sch);
1015 goto err_out3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016}
1017
Patrick McHardy1e904742008-01-22 22:11:17 -08001018static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019{
Eric Dumazeta2da5702011-01-20 03:48:19 +00001020 struct qdisc_size_table *ostab, *stab = NULL;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001021 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001023 if (tca[TCA_OPTIONS]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 if (sch->ops->change == NULL)
1025 return -EINVAL;
Patrick McHardy1e904742008-01-22 22:11:17 -08001026 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 if (err)
1028 return err;
1029 }
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001030
1031 if (tca[TCA_STAB]) {
1032 stab = qdisc_get_stab(tca[TCA_STAB]);
1033 if (IS_ERR(stab))
1034 return PTR_ERR(stab);
1035 }
1036
Eric Dumazeta2da5702011-01-20 03:48:19 +00001037 ostab = rtnl_dereference(sch->stab);
1038 rcu_assign_pointer(sch->stab, stab);
1039 qdisc_put_stab(ostab);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001040
Patrick McHardy23bcf632009-09-09 18:11:23 -07001041 if (tca[TCA_RATE]) {
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001042 /* NB: ignores errors from replace_estimator
1043 because change can't be undone. */
Patrick McHardy23bcf632009-09-09 18:11:23 -07001044 if (sch->flags & TCQ_F_MQROOT)
1045 goto out;
John Fastabend22e0f8b2014-09-28 11:52:56 -07001046 gen_replace_estimator(&sch->bstats,
1047 sch->cpu_bstats,
1048 &sch->rate_est,
1049 qdisc_root_sleeping_lock(sch),
1050 tca[TCA_RATE]);
Patrick McHardy23bcf632009-09-09 18:11:23 -07001051 }
1052out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 return 0;
1054}
1055
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001056struct check_loop_arg {
1057 struct qdisc_walker w;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058 struct Qdisc *p;
1059 int depth;
1060};
1061
1062static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
1063
1064static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1065{
1066 struct check_loop_arg arg;
1067
1068 if (q->ops->cl_ops == NULL)
1069 return 0;
1070
1071 arg.w.stop = arg.w.skip = arg.w.count = 0;
1072 arg.w.fn = check_loop_fn;
1073 arg.depth = depth;
1074 arg.p = p;
1075 q->ops->cl_ops->walk(q, &arg.w);
1076 return arg.w.stop ? -ELOOP : 0;
1077}
1078
1079static int
1080check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1081{
1082 struct Qdisc *leaf;
Eric Dumazet20fea082007-11-14 01:44:41 -08001083 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1085
1086 leaf = cops->leaf(q, cl);
1087 if (leaf) {
1088 if (leaf == arg->p || arg->depth > 7)
1089 return -ELOOP;
1090 return check_loop(leaf, arg->p, arg->depth + 1);
1091 }
1092 return 0;
1093}
1094
1095/*
1096 * Delete/get qdisc.
1097 */
1098
Thomas Graf661d2962013-03-21 07:45:29 +00001099static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001101 struct net *net = sock_net(skb->sk);
David S. Miller02ef22c2012-06-26 21:50:05 -07001102 struct tcmsg *tcm = nlmsg_data(n);
Patrick McHardy1e904742008-01-22 22:11:17 -08001103 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 struct net_device *dev;
Hong zhi guode179c82013-03-25 17:36:33 +00001105 u32 clid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 struct Qdisc *q = NULL;
1107 struct Qdisc *p = NULL;
1108 int err;
1109
Stéphane Graber4e8bbb82014-04-30 11:25:43 -04001110 if ((n->nlmsg_type != RTM_GETQDISC) &&
David S. Miller5f013c9b2014-05-12 13:19:14 -04001111 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
Eric W. Biedermandfc47ef2012-11-16 03:03:00 +00001112 return -EPERM;
1113
Patrick McHardy1e904742008-01-22 22:11:17 -08001114 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1115 if (err < 0)
1116 return err;
1117
Hong zhi guode179c82013-03-25 17:36:33 +00001118 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1119 if (!dev)
1120 return -ENODEV;
1121
1122 clid = tcm->tcm_parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 if (clid) {
1124 if (clid != TC_H_ROOT) {
1125 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001126 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1127 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 return -ENOENT;
1129 q = qdisc_leaf(p, clid);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001130 } else if (dev_ingress_queue(dev)) {
1131 q = dev_ingress_queue(dev)->qdisc_sleeping;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001132 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 } else {
Patrick McHardyaf356af2009-09-04 06:41:18 +00001134 q = dev->qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 }
1136 if (!q)
1137 return -ENOENT;
1138
1139 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
1140 return -EINVAL;
1141 } else {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001142 q = qdisc_lookup(dev, tcm->tcm_handle);
1143 if (!q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 return -ENOENT;
1145 }
1146
Patrick McHardy1e904742008-01-22 22:11:17 -08001147 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 return -EINVAL;
1149
1150 if (n->nlmsg_type == RTM_DELQDISC) {
1151 if (!clid)
1152 return -EINVAL;
1153 if (q->handle == 0)
1154 return -ENOENT;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001155 err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
1156 if (err != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 } else {
Tom Goff7316ae82010-03-19 15:40:13 +00001159 qdisc_notify(net, skb, n, clid, NULL, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 }
1161 return 0;
1162}
1163
1164/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001165 * Create/change qdisc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 */
1167
Thomas Graf661d2962013-03-21 07:45:29 +00001168static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001170 struct net *net = sock_net(skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 struct tcmsg *tcm;
Patrick McHardy1e904742008-01-22 22:11:17 -08001172 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 struct net_device *dev;
1174 u32 clid;
1175 struct Qdisc *q, *p;
1176 int err;
1177
David S. Miller5f013c9b2014-05-12 13:19:14 -04001178 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
Eric W. Biedermandfc47ef2012-11-16 03:03:00 +00001179 return -EPERM;
1180
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181replay:
1182 /* Reinit, just in case something touches this. */
Hong zhi guode179c82013-03-25 17:36:33 +00001183 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1184 if (err < 0)
1185 return err;
1186
David S. Miller02ef22c2012-06-26 21:50:05 -07001187 tcm = nlmsg_data(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 clid = tcm->tcm_parent;
1189 q = p = NULL;
1190
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001191 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1192 if (!dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 return -ENODEV;
1194
Patrick McHardy1e904742008-01-22 22:11:17 -08001195
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 if (clid) {
1197 if (clid != TC_H_ROOT) {
1198 if (clid != TC_H_INGRESS) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001199 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1200 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 return -ENOENT;
1202 q = qdisc_leaf(p, clid);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001203 } else if (dev_ingress_queue_create(dev)) {
1204 q = dev_ingress_queue(dev)->qdisc_sleeping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 }
1206 } else {
Patrick McHardyaf356af2009-09-04 06:41:18 +00001207 q = dev->qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 }
1209
1210 /* It may be default qdisc, ignore it */
1211 if (q && q->handle == 0)
1212 q = NULL;
1213
1214 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1215 if (tcm->tcm_handle) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001216 if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 return -EEXIST;
1218 if (TC_H_MIN(tcm->tcm_handle))
1219 return -EINVAL;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001220 q = qdisc_lookup(dev, tcm->tcm_handle);
1221 if (!q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 goto create_n_graft;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001223 if (n->nlmsg_flags & NLM_F_EXCL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 return -EEXIST;
Patrick McHardy1e904742008-01-22 22:11:17 -08001225 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 return -EINVAL;
1227 if (q == p ||
1228 (p && check_loop(q, p, 0)))
1229 return -ELOOP;
1230 atomic_inc(&q->refcnt);
1231 goto graft;
1232 } else {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001233 if (!q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 goto create_n_graft;
1235
1236 /* This magic test requires explanation.
1237 *
1238 * We know, that some child q is already
1239 * attached to this parent and have choice:
1240 * either to change it or to create/graft new one.
1241 *
1242 * 1. We are allowed to create/graft only
1243 * if CREATE and REPLACE flags are set.
1244 *
1245 * 2. If EXCL is set, requestor wanted to say,
1246 * that qdisc tcm_handle is not expected
1247 * to exist, so that we choose create/graft too.
1248 *
1249 * 3. The last case is when no flags are set.
1250 * Alas, it is sort of hole in API, we
1251 * cannot decide what to do unambiguously.
1252 * For now we select create/graft, if
1253 * user gave KIND, which does not match existing.
1254 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001255 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1256 (n->nlmsg_flags & NLM_F_REPLACE) &&
1257 ((n->nlmsg_flags & NLM_F_EXCL) ||
Patrick McHardy1e904742008-01-22 22:11:17 -08001258 (tca[TCA_KIND] &&
1259 nla_strcmp(tca[TCA_KIND], q->ops->id))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 goto create_n_graft;
1261 }
1262 }
1263 } else {
1264 if (!tcm->tcm_handle)
1265 return -EINVAL;
1266 q = qdisc_lookup(dev, tcm->tcm_handle);
1267 }
1268
1269 /* Change qdisc parameters */
1270 if (q == NULL)
1271 return -ENOENT;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001272 if (n->nlmsg_flags & NLM_F_EXCL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 return -EEXIST;
Patrick McHardy1e904742008-01-22 22:11:17 -08001274 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 return -EINVAL;
1276 err = qdisc_change(q, tca);
1277 if (err == 0)
Tom Goff7316ae82010-03-19 15:40:13 +00001278 qdisc_notify(net, skb, n, clid, NULL, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 return err;
1280
1281create_n_graft:
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001282 if (!(n->nlmsg_flags & NLM_F_CREATE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 return -ENOENT;
Eric Dumazet24824a02010-10-02 06:11:55 +00001284 if (clid == TC_H_INGRESS) {
1285 if (dev_ingress_queue(dev))
1286 q = qdisc_create(dev, dev_ingress_queue(dev), p,
1287 tcm->tcm_parent, tcm->tcm_parent,
1288 tca, &err);
1289 else
1290 err = -ENOENT;
1291 } else {
Jarek Poplawski926e61b2009-09-15 02:53:07 -07001292 struct netdev_queue *dev_queue;
David S. Miller6ec1c692009-09-06 01:58:51 -07001293
1294 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
Jarek Poplawski926e61b2009-09-15 02:53:07 -07001295 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1296 else if (p)
1297 dev_queue = p->dev_queue;
1298 else
1299 dev_queue = netdev_get_tx_queue(dev, 0);
David S. Miller6ec1c692009-09-06 01:58:51 -07001300
Jarek Poplawski926e61b2009-09-15 02:53:07 -07001301 q = qdisc_create(dev, dev_queue, p,
David S. Millerbb949fb2008-07-08 16:55:56 -07001302 tcm->tcm_parent, tcm->tcm_handle,
Patrick McHardyffc8fef2007-07-30 17:11:50 -07001303 tca, &err);
David S. Miller6ec1c692009-09-06 01:58:51 -07001304 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 if (q == NULL) {
1306 if (err == -EAGAIN)
1307 goto replay;
1308 return err;
1309 }
1310
1311graft:
Ilpo Järvinene5befbd2008-08-18 22:30:01 -07001312 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1313 if (err) {
1314 if (q)
1315 qdisc_destroy(q);
1316 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 }
Ilpo Järvinene5befbd2008-08-18 22:30:01 -07001318
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 return 0;
1320}
1321
1322static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001323 u32 portid, u32 seq, u16 flags, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324{
John Fastabend22e0f8b2014-09-28 11:52:56 -07001325 struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
John Fastabendb0ab6f92014-09-28 11:54:24 -07001326 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 struct tcmsg *tcm;
1328 struct nlmsghdr *nlh;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001329 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 struct gnet_dump d;
Eric Dumazeta2da5702011-01-20 03:48:19 +00001331 struct qdisc_size_table *stab;
John Fastabend64015852014-09-28 11:53:57 -07001332 __u32 qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333
Eric Dumazetfba373d2014-03-10 17:11:43 -07001334 cond_resched();
Eric W. Biederman15e47302012-09-07 20:12:54 +00001335 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
David S. Miller02ef22c2012-06-26 21:50:05 -07001336 if (!nlh)
1337 goto out_nlmsg_trim;
1338 tcm = nlmsg_data(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 tcm->tcm_family = AF_UNSPEC;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07001340 tcm->tcm__pad1 = 0;
1341 tcm->tcm__pad2 = 0;
David S. Miller5ce2d482008-07-08 17:06:30 -07001342 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 tcm->tcm_parent = clid;
1344 tcm->tcm_handle = q->handle;
1345 tcm->tcm_info = atomic_read(&q->refcnt);
David S. Miller1b34ec42012-03-29 05:11:39 -04001346 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1347 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 if (q->ops->dump && q->ops->dump(q, skb) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001349 goto nla_put_failure;
John Fastabend64015852014-09-28 11:53:57 -07001350 qlen = q->q.qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351
Eric Dumazeta2da5702011-01-20 03:48:19 +00001352 stab = rtnl_dereference(q->stab);
1353 if (stab && qdisc_dump_stab(skb, stab) < 0)
Jussi Kivilinna175f9c12008-07-20 00:08:47 -07001354 goto nla_put_failure;
1355
Jarek Poplawski102396a2008-08-29 14:21:52 -07001356 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1357 qdisc_root_sleeping_lock(q), &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001358 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359
1360 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001361 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362
John Fastabendb0ab6f92014-09-28 11:54:24 -07001363 if (qdisc_is_percpu_stats(q)) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001364 cpu_bstats = q->cpu_bstats;
John Fastabendb0ab6f92014-09-28 11:54:24 -07001365 cpu_qstats = q->cpu_qstats;
1366 }
John Fastabend22e0f8b2014-09-28 11:52:56 -07001367
1368 if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 ||
Eric Dumazetd250a5f2009-10-02 10:32:18 +00001369 gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
John Fastabendb0ab6f92014-09-28 11:54:24 -07001370 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001371 goto nla_put_failure;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001372
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 if (gnet_stats_finish_copy(&d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001374 goto nla_put_failure;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001375
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001376 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 return skb->len;
1378
David S. Miller02ef22c2012-06-26 21:50:05 -07001379out_nlmsg_trim:
Patrick McHardy1e904742008-01-22 22:11:17 -08001380nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001381 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 return -1;
1383}
1384
Eric Dumazet53b0f082010-05-22 20:37:44 +00001385static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1386{
1387 return (q->flags & TCQ_F_BUILTIN) ? true : false;
1388}
1389
Tom Goff7316ae82010-03-19 15:40:13 +00001390static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1391 struct nlmsghdr *n, u32 clid,
1392 struct Qdisc *old, struct Qdisc *new)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393{
1394 struct sk_buff *skb;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001395 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396
1397 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1398 if (!skb)
1399 return -ENOBUFS;
1400
Eric Dumazet53b0f082010-05-22 20:37:44 +00001401 if (old && !tc_qdisc_dump_ignore(old)) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001402 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001403 0, RTM_DELQDISC) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 goto err_out;
1405 }
Eric Dumazet53b0f082010-05-22 20:37:44 +00001406 if (new && !tc_qdisc_dump_ignore(new)) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001407 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001408 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 goto err_out;
1410 }
1411
1412 if (skb->len)
Eric W. Biederman15e47302012-09-07 20:12:54 +00001413 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001414 n->nlmsg_flags & NLM_F_ECHO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415
1416err_out:
1417 kfree_skb(skb);
1418 return -EINVAL;
1419}
1420
David S. Miller30723672008-07-18 22:50:15 -07001421static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1422 struct netlink_callback *cb,
1423 int *q_idx_p, int s_q_idx)
1424{
1425 int ret = 0, q_idx = *q_idx_p;
1426 struct Qdisc *q;
1427
1428 if (!root)
1429 return 0;
1430
1431 q = root;
1432 if (q_idx < s_q_idx) {
1433 q_idx++;
1434 } else {
1435 if (!tc_qdisc_dump_ignore(q) &&
Eric W. Biederman15e47302012-09-07 20:12:54 +00001436 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
David S. Miller30723672008-07-18 22:50:15 -07001437 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1438 goto done;
1439 q_idx++;
1440 }
1441 list_for_each_entry(q, &root->list, list) {
1442 if (q_idx < s_q_idx) {
1443 q_idx++;
1444 continue;
1445 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001446 if (!tc_qdisc_dump_ignore(q) &&
Eric W. Biederman15e47302012-09-07 20:12:54 +00001447 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
David S. Miller30723672008-07-18 22:50:15 -07001448 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1449 goto done;
1450 q_idx++;
1451 }
1452
1453out:
1454 *q_idx_p = q_idx;
1455 return ret;
1456done:
1457 ret = -1;
1458 goto out;
1459}
1460
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1462{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001463 struct net *net = sock_net(skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 int idx, q_idx;
1465 int s_idx, s_q_idx;
1466 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467
1468 s_idx = cb->args[0];
1469 s_q_idx = q_idx = cb->args[1];
stephen hemmingerf1e90162009-11-10 07:54:49 +00001470
Pavel Emelianov7562f872007-05-03 15:13:45 -07001471 idx = 0;
Eric Dumazet15dc36e2014-03-10 17:11:42 -07001472 ASSERT_RTNL();
1473 for_each_netdev(net, dev) {
David S. Miller30723672008-07-18 22:50:15 -07001474 struct netdev_queue *dev_queue;
1475
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 if (idx < s_idx)
Pavel Emelianov7562f872007-05-03 15:13:45 -07001477 goto cont;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 if (idx > s_idx)
1479 s_q_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 q_idx = 0;
David S. Miller30723672008-07-18 22:50:15 -07001481
Patrick McHardyaf356af2009-09-04 06:41:18 +00001482 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
David S. Miller30723672008-07-18 22:50:15 -07001483 goto done;
1484
Eric Dumazet24824a02010-10-02 06:11:55 +00001485 dev_queue = dev_ingress_queue(dev);
1486 if (dev_queue &&
1487 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1488 &q_idx, s_q_idx) < 0)
David S. Miller30723672008-07-18 22:50:15 -07001489 goto done;
1490
Pavel Emelianov7562f872007-05-03 15:13:45 -07001491cont:
1492 idx++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 }
1494
1495done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 cb->args[0] = idx;
1497 cb->args[1] = q_idx;
1498
1499 return skb->len;
1500}
1501
1502
1503
1504/************************************************
1505 * Traffic classes manipulation. *
1506 ************************************************/
1507
1508
1509
Thomas Graf661d2962013-03-21 07:45:29 +00001510static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001512 struct net *net = sock_net(skb->sk);
David S. Miller02ef22c2012-06-26 21:50:05 -07001513 struct tcmsg *tcm = nlmsg_data(n);
Patrick McHardy1e904742008-01-22 22:11:17 -08001514 struct nlattr *tca[TCA_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 struct net_device *dev;
1516 struct Qdisc *q = NULL;
Eric Dumazet20fea082007-11-14 01:44:41 -08001517 const struct Qdisc_class_ops *cops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 unsigned long cl = 0;
1519 unsigned long new_cl;
Hong zhi guode179c82013-03-25 17:36:33 +00001520 u32 portid;
1521 u32 clid;
1522 u32 qid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 int err;
1524
Stéphane Graber4e8bbb82014-04-30 11:25:43 -04001525 if ((n->nlmsg_type != RTM_GETTCLASS) &&
David S. Miller5f013c9b2014-05-12 13:19:14 -04001526 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
Eric W. Biedermandfc47ef2012-11-16 03:03:00 +00001527 return -EPERM;
1528
Patrick McHardy1e904742008-01-22 22:11:17 -08001529 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1530 if (err < 0)
1531 return err;
1532
Hong zhi guode179c82013-03-25 17:36:33 +00001533 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1534 if (!dev)
1535 return -ENODEV;
1536
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 /*
1538 parent == TC_H_UNSPEC - unspecified parent.
1539 parent == TC_H_ROOT - class is root, which has no parent.
1540 parent == X:0 - parent is root class.
1541 parent == X:Y - parent is a node in hierarchy.
1542 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1543
1544 handle == 0:0 - generate handle from kernel pool.
1545 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1546 handle == X:Y - clear.
1547 handle == X:0 - root class.
1548 */
1549
1550 /* Step 1. Determine qdisc handle X:0 */
1551
Hong zhi guode179c82013-03-25 17:36:33 +00001552 portid = tcm->tcm_parent;
1553 clid = tcm->tcm_handle;
1554 qid = TC_H_MAJ(clid);
1555
Eric W. Biederman15e47302012-09-07 20:12:54 +00001556 if (portid != TC_H_ROOT) {
1557 u32 qid1 = TC_H_MAJ(portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558
1559 if (qid && qid1) {
1560 /* If both majors are known, they must be identical. */
1561 if (qid != qid1)
1562 return -EINVAL;
1563 } else if (qid1) {
1564 qid = qid1;
1565 } else if (qid == 0)
Patrick McHardyaf356af2009-09-04 06:41:18 +00001566 qid = dev->qdisc->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567
1568 /* Now qid is genuine qdisc handle consistent
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001569 * both with parent and child.
1570 *
Eric W. Biederman15e47302012-09-07 20:12:54 +00001571 * TC_H_MAJ(portid) still may be unspecified, complete it now.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 */
Eric W. Biederman15e47302012-09-07 20:12:54 +00001573 if (portid)
1574 portid = TC_H_MAKE(qid, portid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 } else {
1576 if (qid == 0)
Patrick McHardyaf356af2009-09-04 06:41:18 +00001577 qid = dev->qdisc->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 }
1579
1580 /* OK. Locate qdisc */
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001581 q = qdisc_lookup(dev, qid);
1582 if (!q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 return -ENOENT;
1584
1585 /* An check that it supports classes */
1586 cops = q->ops->cl_ops;
1587 if (cops == NULL)
1588 return -EINVAL;
1589
1590 /* Now try to get class */
1591 if (clid == 0) {
Eric W. Biederman15e47302012-09-07 20:12:54 +00001592 if (portid == TC_H_ROOT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 clid = qid;
1594 } else
1595 clid = TC_H_MAKE(qid, clid);
1596
1597 if (clid)
1598 cl = cops->get(q, clid);
1599
1600 if (cl == 0) {
1601 err = -ENOENT;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001602 if (n->nlmsg_type != RTM_NEWTCLASS ||
1603 !(n->nlmsg_flags & NLM_F_CREATE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 goto out;
1605 } else {
1606 switch (n->nlmsg_type) {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001607 case RTM_NEWTCLASS:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 err = -EEXIST;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001609 if (n->nlmsg_flags & NLM_F_EXCL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 goto out;
1611 break;
1612 case RTM_DELTCLASS:
Patrick McHardyde6d5cd2009-09-04 06:41:16 +00001613 err = -EOPNOTSUPP;
1614 if (cops->delete)
1615 err = cops->delete(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 if (err == 0)
Tom Goff7316ae82010-03-19 15:40:13 +00001617 tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 goto out;
1619 case RTM_GETTCLASS:
Tom Goff7316ae82010-03-19 15:40:13 +00001620 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 goto out;
1622 default:
1623 err = -EINVAL;
1624 goto out;
1625 }
1626 }
1627
1628 new_cl = cl;
Patrick McHardyde6d5cd2009-09-04 06:41:16 +00001629 err = -EOPNOTSUPP;
1630 if (cops->change)
Eric W. Biederman15e47302012-09-07 20:12:54 +00001631 err = cops->change(q, clid, portid, tca, &new_cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 if (err == 0)
Tom Goff7316ae82010-03-19 15:40:13 +00001633 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634
1635out:
1636 if (cl)
1637 cops->put(q, cl);
1638
1639 return err;
1640}
1641
1642
1643static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1644 unsigned long cl,
Eric W. Biederman15e47302012-09-07 20:12:54 +00001645 u32 portid, u32 seq, u16 flags, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646{
1647 struct tcmsg *tcm;
1648 struct nlmsghdr *nlh;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001649 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 struct gnet_dump d;
Eric Dumazet20fea082007-11-14 01:44:41 -08001651 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652
Eric Dumazetfba373d2014-03-10 17:11:43 -07001653 cond_resched();
Eric W. Biederman15e47302012-09-07 20:12:54 +00001654 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
David S. Miller02ef22c2012-06-26 21:50:05 -07001655 if (!nlh)
1656 goto out_nlmsg_trim;
1657 tcm = nlmsg_data(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 tcm->tcm_family = AF_UNSPEC;
Eric Dumazet16ebb5e2009-09-02 02:40:09 +00001659 tcm->tcm__pad1 = 0;
1660 tcm->tcm__pad2 = 0;
David S. Miller5ce2d482008-07-08 17:06:30 -07001661 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 tcm->tcm_parent = q->handle;
1663 tcm->tcm_handle = q->handle;
1664 tcm->tcm_info = 0;
David S. Miller1b34ec42012-03-29 05:11:39 -04001665 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1666 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001668 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669
Jarek Poplawski102396a2008-08-29 14:21:52 -07001670 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1671 qdisc_root_sleeping_lock(q), &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001672 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673
1674 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001675 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676
1677 if (gnet_stats_finish_copy(&d) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001678 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001680 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 return skb->len;
1682
David S. Miller02ef22c2012-06-26 21:50:05 -07001683out_nlmsg_trim:
Patrick McHardy1e904742008-01-22 22:11:17 -08001684nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001685 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 return -1;
1687}
1688
Tom Goff7316ae82010-03-19 15:40:13 +00001689static int tclass_notify(struct net *net, struct sk_buff *oskb,
1690 struct nlmsghdr *n, struct Qdisc *q,
1691 unsigned long cl, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692{
1693 struct sk_buff *skb;
Eric W. Biederman15e47302012-09-07 20:12:54 +00001694 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695
1696 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1697 if (!skb)
1698 return -ENOBUFS;
1699
Eric W. Biederman15e47302012-09-07 20:12:54 +00001700 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 kfree_skb(skb);
1702 return -EINVAL;
1703 }
1704
Eric W. Biederman15e47302012-09-07 20:12:54 +00001705 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001706 n->nlmsg_flags & NLM_F_ECHO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707}
1708
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001709struct qdisc_dump_args {
1710 struct qdisc_walker w;
1711 struct sk_buff *skb;
1712 struct netlink_callback *cb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713};
1714
1715static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1716{
1717 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1718
Eric W. Biederman15e47302012-09-07 20:12:54 +00001719 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1721}
1722
David S. Miller30723672008-07-18 22:50:15 -07001723static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1724 struct tcmsg *tcm, struct netlink_callback *cb,
1725 int *t_p, int s_t)
1726{
1727 struct qdisc_dump_args arg;
1728
1729 if (tc_qdisc_dump_ignore(q) ||
1730 *t_p < s_t || !q->ops->cl_ops ||
1731 (tcm->tcm_parent &&
1732 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1733 (*t_p)++;
1734 return 0;
1735 }
1736 if (*t_p > s_t)
1737 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1738 arg.w.fn = qdisc_class_dump;
1739 arg.skb = skb;
1740 arg.cb = cb;
1741 arg.w.stop = 0;
1742 arg.w.skip = cb->args[1];
1743 arg.w.count = 0;
1744 q->ops->cl_ops->walk(q, &arg.w);
1745 cb->args[1] = arg.w.count;
1746 if (arg.w.stop)
1747 return -1;
1748 (*t_p)++;
1749 return 0;
1750}
1751
1752static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1753 struct tcmsg *tcm, struct netlink_callback *cb,
1754 int *t_p, int s_t)
1755{
1756 struct Qdisc *q;
1757
1758 if (!root)
1759 return 0;
1760
1761 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1762 return -1;
1763
1764 list_for_each_entry(q, &root->list, list) {
1765 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1766 return -1;
1767 }
1768
1769 return 0;
1770}
1771
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1773{
David S. Miller02ef22c2012-06-26 21:50:05 -07001774 struct tcmsg *tcm = nlmsg_data(cb->nlh);
David S. Miller30723672008-07-18 22:50:15 -07001775 struct net *net = sock_net(skb->sk);
1776 struct netdev_queue *dev_queue;
1777 struct net_device *dev;
1778 int t, s_t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779
Hong zhi guo573ce262013-03-27 06:47:04 +00001780 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 return 0;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001782 dev = dev_get_by_index(net, tcm->tcm_ifindex);
1783 if (!dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 return 0;
1785
1786 s_t = cb->args[0];
1787 t = 0;
1788
Patrick McHardyaf356af2009-09-04 06:41:18 +00001789 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
David S. Miller30723672008-07-18 22:50:15 -07001790 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791
Eric Dumazet24824a02010-10-02 06:11:55 +00001792 dev_queue = dev_ingress_queue(dev);
1793 if (dev_queue &&
1794 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
1795 &t, s_t) < 0)
David S. Miller30723672008-07-18 22:50:15 -07001796 goto done;
1797
1798done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 cb->args[0] = t;
1800
1801 dev_put(dev);
1802 return skb->len;
1803}
1804
1805/* Main classifier routine: scans classifier chain attached
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001806 * to this qdisc, (optionally) tests for protocol and asks
1807 * specific classifiers.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 */
Daniel Borkmann3b3ae882015-08-26 23:00:06 +02001809int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1810 struct tcf_result *res, bool compat_mode)
Patrick McHardy73ca4912007-07-15 00:02:31 -07001811{
Jiri Pirkod8b96052015-01-13 17:13:43 +01001812 __be16 protocol = tc_skb_protocol(skb);
Daniel Borkmann3b3ae882015-08-26 23:00:06 +02001813#ifdef CONFIG_NET_CLS_ACT
1814 const struct tcf_proto *old_tp = tp;
1815 int limit = 0;
Patrick McHardy73ca4912007-07-15 00:02:31 -07001816
Daniel Borkmann3b3ae882015-08-26 23:00:06 +02001817reclassify:
1818#endif
John Fastabend25d8c0d2014-09-12 20:05:27 -07001819 for (; tp; tp = rcu_dereference_bh(tp->next)) {
Daniel Borkmann3b3ae882015-08-26 23:00:06 +02001820 int err;
1821
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001822 if (tp->protocol != protocol &&
1823 tp->protocol != htons(ETH_P_ALL))
1824 continue;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001825
Daniel Borkmann3b3ae882015-08-26 23:00:06 +02001826 err = tp->classify(skb, tp, res);
1827#ifdef CONFIG_NET_CLS_ACT
Daniel Borkmannc1b3b192015-08-28 18:46:39 +02001828 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode))
Daniel Borkmann3b3ae882015-08-26 23:00:06 +02001829 goto reset;
1830#endif
Florian Westphale578d9c2015-05-11 19:50:41 +02001831 if (err >= 0)
Patrick McHardy73ca4912007-07-15 00:02:31 -07001832 return err;
Patrick McHardy73ca4912007-07-15 00:02:31 -07001833 }
Daniel Borkmann3b3ae882015-08-26 23:00:06 +02001834
Patrick McHardy73ca4912007-07-15 00:02:31 -07001835 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836#ifdef CONFIG_NET_CLS_ACT
Daniel Borkmann3b3ae882015-08-26 23:00:06 +02001837reset:
1838 if (unlikely(limit++ >= MAX_REC_LOOP)) {
Daniel Borkmannc1b3b192015-08-28 18:46:39 +02001839 net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
1840 tp->q->ops->id, tp->prio & 0xffff,
1841 ntohs(tp->protocol));
Daniel Borkmann3b3ae882015-08-26 23:00:06 +02001842 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843 }
Daniel Borkmann3b3ae882015-08-26 23:00:06 +02001844
1845 tp = old_tp;
1846 goto reclassify;
Patrick McHardy73ca4912007-07-15 00:02:31 -07001847#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848}
Patrick McHardy73ca4912007-07-15 00:02:31 -07001849EXPORT_SYMBOL(tc_classify);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850
Cong Wang1e052be2015-03-06 11:47:59 -08001851bool tcf_destroy(struct tcf_proto *tp, bool force)
Patrick McHardya48b5a62007-03-23 11:29:43 -07001852{
Cong Wang1e052be2015-03-06 11:47:59 -08001853 if (tp->ops->destroy(tp, force)) {
1854 module_put(tp->ops->owner);
1855 kfree_rcu(tp, rcu);
1856 return true;
1857 }
1858
1859 return false;
Patrick McHardya48b5a62007-03-23 11:29:43 -07001860}
1861
John Fastabend25d8c0d2014-09-12 20:05:27 -07001862void tcf_destroy_chain(struct tcf_proto __rcu **fl)
Patrick McHardya48b5a62007-03-23 11:29:43 -07001863{
1864 struct tcf_proto *tp;
1865
John Fastabend25d8c0d2014-09-12 20:05:27 -07001866 while ((tp = rtnl_dereference(*fl)) != NULL) {
1867 RCU_INIT_POINTER(*fl, tp->next);
Cong Wang1e052be2015-03-06 11:47:59 -08001868 tcf_destroy(tp, true);
Patrick McHardya48b5a62007-03-23 11:29:43 -07001869 }
1870}
1871EXPORT_SYMBOL(tcf_destroy_chain);
1872
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873#ifdef CONFIG_PROC_FS
1874static int psched_show(struct seq_file *seq, void *v)
1875{
1876 seq_printf(seq, "%08x %08x %08x %08x\n",
Jarek Poplawskica44d6e2009-06-15 02:31:47 -07001877 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
Patrick McHardy514bca32007-03-16 12:34:52 -07001878 1000000,
Thomas Gleixner1e317682015-04-14 21:08:28 +00001879 (u32)NSEC_PER_SEC / hrtimer_resolution);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880
1881 return 0;
1882}
1883
1884static int psched_open(struct inode *inode, struct file *file)
1885{
Tom Goff7e5ab152010-03-30 19:44:56 -07001886 return single_open(file, psched_show, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887}
1888
Arjan van de Venda7071d2007-02-12 00:55:36 -08001889static const struct file_operations psched_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 .owner = THIS_MODULE,
1891 .open = psched_open,
1892 .read = seq_read,
1893 .llseek = seq_lseek,
1894 .release = single_release,
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001895};
Tom Goff7316ae82010-03-19 15:40:13 +00001896
1897static int __net_init psched_net_init(struct net *net)
1898{
1899 struct proc_dir_entry *e;
1900
Gao fengd4beaa62013-02-18 01:34:54 +00001901 e = proc_create("psched", 0, net->proc_net, &psched_fops);
Tom Goff7316ae82010-03-19 15:40:13 +00001902 if (e == NULL)
1903 return -ENOMEM;
1904
1905 return 0;
1906}
1907
1908static void __net_exit psched_net_exit(struct net *net)
1909{
Gao fengece31ff2013-02-18 01:34:56 +00001910 remove_proc_entry("psched", net->proc_net);
Tom Goff7316ae82010-03-19 15:40:13 +00001911}
1912#else
1913static int __net_init psched_net_init(struct net *net)
1914{
1915 return 0;
1916}
1917
1918static void __net_exit psched_net_exit(struct net *net)
1919{
1920}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921#endif
1922
Tom Goff7316ae82010-03-19 15:40:13 +00001923static struct pernet_operations psched_net_ops = {
1924 .init = psched_net_init,
1925 .exit = psched_net_exit,
1926};
1927
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928static int __init pktsched_init(void)
1929{
Tom Goff7316ae82010-03-19 15:40:13 +00001930 int err;
1931
1932 err = register_pernet_subsys(&psched_net_ops);
1933 if (err) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001934 pr_err("pktsched_init: "
Tom Goff7316ae82010-03-19 15:40:13 +00001935 "cannot initialize per netns operations\n");
1936 return err;
1937 }
1938
stephen hemminger6da7c8f2013-08-27 16:19:08 -07001939 register_qdisc(&pfifo_fast_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 register_qdisc(&pfifo_qdisc_ops);
1941 register_qdisc(&bfifo_qdisc_ops);
Hagen Paul Pfeifer57dbb2d2010-01-24 12:30:59 +00001942 register_qdisc(&pfifo_head_drop_qdisc_ops);
David S. Miller6ec1c692009-09-06 01:58:51 -07001943 register_qdisc(&mq_qdisc_ops);
Phil Sutterd66d6c32015-08-27 21:21:38 +02001944 register_qdisc(&noqueue_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945
Greg Rosec7ac8672011-06-10 01:27:09 +00001946 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
1947 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
1948 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL);
1949 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
1950 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
1951 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL);
Thomas Grafbe577dd2007-03-22 11:55:50 -07001952
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 return 0;
1954}
1955
1956subsys_initcall(pktsched_init);