blob: 604767482ad07ec3607599f6387ff02cc26780ad [file] [log] [blame]
Alexander Duyck92651942008-09-12 16:29:34 -07001/*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
Jeff Kirsherc057b192013-12-06 09:13:44 -080014 * this program; if not, see <http://www.gnu.org/licenses/>.
Alexander Duyck92651942008-09-12 16:29:34 -070015 *
16 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
17 */
18
19#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/slab.h>
Alexander Duyck92651942008-09-12 16:29:34 -070021#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/string.h>
24#include <linux/errno.h>
25#include <linux/skbuff.h>
26#include <net/netlink.h>
27#include <net/pkt_sched.h>
Jiri Pirkocf1facd2017-02-09 14:38:56 +010028#include <net/pkt_cls.h>
Alexander Duyck92651942008-09-12 16:29:34 -070029
30struct multiq_sched_data {
31 u16 bands;
32 u16 max_bands;
33 u16 curband;
John Fastabend25d8c0d2014-09-12 20:05:27 -070034 struct tcf_proto __rcu *filter_list;
Jiri Pirko6529eab2017-05-17 11:07:55 +020035 struct tcf_block *block;
Alexander Duyck92651942008-09-12 16:29:34 -070036 struct Qdisc **queues;
37};
38
39
40static struct Qdisc *
41multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
42{
43 struct multiq_sched_data *q = qdisc_priv(sch);
44 u32 band;
45 struct tcf_result res;
John Fastabend25d8c0d2014-09-12 20:05:27 -070046 struct tcf_proto *fl = rcu_dereference_bh(q->filter_list);
Alexander Duyck92651942008-09-12 16:29:34 -070047 int err;
48
49 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Jiri Pirko87d83092017-05-17 11:07:54 +020050 err = tcf_classify(skb, fl, &res, false);
Alexander Duyck92651942008-09-12 16:29:34 -070051#ifdef CONFIG_NET_CLS_ACT
52 switch (err) {
53 case TC_ACT_STOLEN:
54 case TC_ACT_QUEUED:
55 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
56 case TC_ACT_SHOT:
57 return NULL;
58 }
59#endif
60 band = skb_get_queue_mapping(skb);
61
62 if (band >= q->bands)
63 return q->queues[0];
64
65 return q->queues[band];
66}
67
68static int
Eric Dumazet520ac302016-06-21 23:16:49 -070069multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
70 struct sk_buff **to_free)
Alexander Duyck92651942008-09-12 16:29:34 -070071{
72 struct Qdisc *qdisc;
73 int ret;
74
75 qdisc = multiq_classify(skb, sch, &ret);
76#ifdef CONFIG_NET_CLS_ACT
77 if (qdisc == NULL) {
78
79 if (ret & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -070080 qdisc_qstats_drop(sch);
Eric Dumazet520ac302016-06-21 23:16:49 -070081 __qdisc_drop(skb, to_free);
Alexander Duyck92651942008-09-12 16:29:34 -070082 return ret;
83 }
84#endif
85
Eric Dumazet520ac302016-06-21 23:16:49 -070086 ret = qdisc_enqueue(skb, qdisc, to_free);
Alexander Duyck92651942008-09-12 16:29:34 -070087 if (ret == NET_XMIT_SUCCESS) {
Alexander Duyck92651942008-09-12 16:29:34 -070088 sch->q.qlen++;
89 return NET_XMIT_SUCCESS;
90 }
91 if (net_xmit_drop_count(ret))
John Fastabend25331d62014-09-28 11:53:29 -070092 qdisc_qstats_drop(sch);
Alexander Duyck92651942008-09-12 16:29:34 -070093 return ret;
94}
95
Alexander Duyck92651942008-09-12 16:29:34 -070096static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
97{
98 struct multiq_sched_data *q = qdisc_priv(sch);
99 struct Qdisc *qdisc;
100 struct sk_buff *skb;
101 int band;
102
103 for (band = 0; band < q->bands; band++) {
104 /* cycle through bands to ensure fairness */
105 q->curband++;
106 if (q->curband >= q->bands)
107 q->curband = 0;
108
109 /* Check that target subqueue is available before
Jarek Poplawskif30ab412008-11-13 22:56:30 -0800110 * pulling an skb to avoid head-of-line blocking.
Alexander Duyck92651942008-09-12 16:29:34 -0700111 */
Tom Herbert734664982011-11-28 16:32:44 +0000112 if (!netif_xmit_stopped(
113 netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
Alexander Duyck92651942008-09-12 16:29:34 -0700114 qdisc = q->queues[q->curband];
115 skb = qdisc->dequeue(qdisc);
116 if (skb) {
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800117 qdisc_bstats_update(sch, skb);
Alexander Duyck92651942008-09-12 16:29:34 -0700118 sch->q.qlen--;
119 return skb;
120 }
121 }
122 }
123 return NULL;
124
125}
126
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700127static struct sk_buff *multiq_peek(struct Qdisc *sch)
128{
129 struct multiq_sched_data *q = qdisc_priv(sch);
130 unsigned int curband = q->curband;
131 struct Qdisc *qdisc;
132 struct sk_buff *skb;
133 int band;
134
135 for (band = 0; band < q->bands; band++) {
136 /* cycle through bands to ensure fairness */
137 curband++;
138 if (curband >= q->bands)
139 curband = 0;
140
141 /* Check that target subqueue is available before
Jarek Poplawskif30ab412008-11-13 22:56:30 -0800142 * pulling an skb to avoid head-of-line blocking.
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700143 */
Tom Herbert734664982011-11-28 16:32:44 +0000144 if (!netif_xmit_stopped(
145 netdev_get_tx_queue(qdisc_dev(sch), curband))) {
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700146 qdisc = q->queues[curband];
147 skb = qdisc->ops->peek(qdisc);
148 if (skb)
149 return skb;
150 }
151 }
152 return NULL;
153
154}
155
Alexander Duyck92651942008-09-12 16:29:34 -0700156static void
157multiq_reset(struct Qdisc *sch)
158{
159 u16 band;
160 struct multiq_sched_data *q = qdisc_priv(sch);
161
162 for (band = 0; band < q->bands; band++)
163 qdisc_reset(q->queues[band]);
164 sch->q.qlen = 0;
165 q->curband = 0;
166}
167
168static void
169multiq_destroy(struct Qdisc *sch)
170{
171 int band;
172 struct multiq_sched_data *q = qdisc_priv(sch);
173
Jiri Pirko6529eab2017-05-17 11:07:55 +0200174 tcf_block_put(q->block);
Alexander Duyck92651942008-09-12 16:29:34 -0700175 for (band = 0; band < q->bands; band++)
176 qdisc_destroy(q->queues[band]);
177
178 kfree(q->queues);
179}
180
181static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
182{
183 struct multiq_sched_data *q = qdisc_priv(sch);
184 struct tc_multiq_qopt *qopt;
185 int i;
186
187 if (!netif_is_multiqueue(qdisc_dev(sch)))
Jarek Poplawski149490f2009-02-10 00:11:21 -0800188 return -EOPNOTSUPP;
Alexander Duyck92651942008-09-12 16:29:34 -0700189 if (nla_len(opt) < sizeof(*qopt))
190 return -EINVAL;
191
192 qopt = nla_data(opt);
193
194 qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
195
196 sch_tree_lock(sch);
197 q->bands = qopt->bands;
198 for (i = q->bands; i < q->max_bands; i++) {
Alexander Duyckf07d1502008-09-12 17:57:23 -0700199 if (q->queues[i] != &noop_qdisc) {
Patrick McHardyb94c8af2008-11-20 04:11:36 -0800200 struct Qdisc *child = q->queues[i];
201 q->queues[i] = &noop_qdisc;
WANG Cong2ccccf52016-02-25 14:55:01 -0800202 qdisc_tree_reduce_backlog(child, child->q.qlen,
203 child->qstats.backlog);
Alexander Duyck92651942008-09-12 16:29:34 -0700204 qdisc_destroy(child);
205 }
206 }
207
208 sch_tree_unlock(sch);
209
210 for (i = 0; i < q->bands; i++) {
211 if (q->queues[i] == &noop_qdisc) {
Patrick McHardyb94c8af2008-11-20 04:11:36 -0800212 struct Qdisc *child, *old;
Changli Gao3511c912010-10-16 13:04:08 +0000213 child = qdisc_create_dflt(sch->dev_queue,
Alexander Duyck92651942008-09-12 16:29:34 -0700214 &pfifo_qdisc_ops,
215 TC_H_MAKE(sch->handle,
216 i + 1));
217 if (child) {
218 sch_tree_lock(sch);
Patrick McHardyb94c8af2008-11-20 04:11:36 -0800219 old = q->queues[i];
220 q->queues[i] = child;
Jiri Kosina49b49972017-03-08 16:03:32 +0100221 if (child != &noop_qdisc)
222 qdisc_hash_add(child, true);
Alexander Duyck92651942008-09-12 16:29:34 -0700223
Patrick McHardyb94c8af2008-11-20 04:11:36 -0800224 if (old != &noop_qdisc) {
WANG Cong2ccccf52016-02-25 14:55:01 -0800225 qdisc_tree_reduce_backlog(old,
226 old->q.qlen,
227 old->qstats.backlog);
Patrick McHardyb94c8af2008-11-20 04:11:36 -0800228 qdisc_destroy(old);
Alexander Duyck92651942008-09-12 16:29:34 -0700229 }
230 sch_tree_unlock(sch);
231 }
232 }
233 }
234 return 0;
235}
236
237static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
238{
239 struct multiq_sched_data *q = qdisc_priv(sch);
Alexander Duyckf07d1502008-09-12 17:57:23 -0700240 int i, err;
Alexander Duyck92651942008-09-12 16:29:34 -0700241
242 q->queues = NULL;
243
244 if (opt == NULL)
245 return -EINVAL;
246
Jiri Pirko6529eab2017-05-17 11:07:55 +0200247 err = tcf_block_get(&q->block, &q->filter_list);
248 if (err)
249 return err;
250
Alexander Duyck92651942008-09-12 16:29:34 -0700251 q->max_bands = qdisc_dev(sch)->num_tx_queues;
252
253 q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
254 if (!q->queues)
255 return -ENOBUFS;
256 for (i = 0; i < q->max_bands; i++)
257 q->queues[i] = &noop_qdisc;
258
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000259 err = multiq_tune(sch, opt);
Alexander Duyckf07d1502008-09-12 17:57:23 -0700260
261 if (err)
262 kfree(q->queues);
263
264 return err;
Alexander Duyck92651942008-09-12 16:29:34 -0700265}
266
267static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
268{
269 struct multiq_sched_data *q = qdisc_priv(sch);
270 unsigned char *b = skb_tail_pointer(skb);
271 struct tc_multiq_qopt opt;
272
273 opt.bands = q->bands;
274 opt.max_bands = q->max_bands;
275
David S. Miller1b34ec42012-03-29 05:11:39 -0400276 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
277 goto nla_put_failure;
Alexander Duyck92651942008-09-12 16:29:34 -0700278
279 return skb->len;
280
281nla_put_failure:
282 nlmsg_trim(skb, b);
283 return -1;
284}
285
286static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
287 struct Qdisc **old)
288{
289 struct multiq_sched_data *q = qdisc_priv(sch);
290 unsigned long band = arg - 1;
291
Alexander Duyck92651942008-09-12 16:29:34 -0700292 if (new == NULL)
293 new = &noop_qdisc;
294
WANG Cong86a79962016-02-25 14:55:00 -0800295 *old = qdisc_replace(sch, new, &q->queues[band]);
Alexander Duyck92651942008-09-12 16:29:34 -0700296 return 0;
297}
298
299static struct Qdisc *
300multiq_leaf(struct Qdisc *sch, unsigned long arg)
301{
302 struct multiq_sched_data *q = qdisc_priv(sch);
303 unsigned long band = arg - 1;
304
Alexander Duyck92651942008-09-12 16:29:34 -0700305 return q->queues[band];
306}
307
308static unsigned long multiq_get(struct Qdisc *sch, u32 classid)
309{
310 struct multiq_sched_data *q = qdisc_priv(sch);
311 unsigned long band = TC_H_MIN(classid);
312
313 if (band - 1 >= q->bands)
314 return 0;
315 return band;
316}
317
318static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
319 u32 classid)
320{
321 return multiq_get(sch, classid);
322}
323
324
325static void multiq_put(struct Qdisc *q, unsigned long cl)
326{
Alexander Duyck92651942008-09-12 16:29:34 -0700327}
328
Alexander Duyck92651942008-09-12 16:29:34 -0700329static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
330 struct sk_buff *skb, struct tcmsg *tcm)
331{
332 struct multiq_sched_data *q = qdisc_priv(sch);
333
Alexander Duyck92651942008-09-12 16:29:34 -0700334 tcm->tcm_handle |= TC_H_MIN(cl);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000335 tcm->tcm_info = q->queues[cl - 1]->handle;
Alexander Duyck92651942008-09-12 16:29:34 -0700336 return 0;
337}
338
339static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
340 struct gnet_dump *d)
341{
342 struct multiq_sched_data *q = qdisc_priv(sch);
343 struct Qdisc *cl_q;
344
345 cl_q = q->queues[cl - 1];
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700346 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
347 d, NULL, &cl_q->bstats) < 0 ||
John Fastabendb0ab6f92014-09-28 11:54:24 -0700348 gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
Alexander Duyck92651942008-09-12 16:29:34 -0700349 return -1;
350
351 return 0;
352}
353
354static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
355{
356 struct multiq_sched_data *q = qdisc_priv(sch);
357 int band;
358
359 if (arg->stop)
360 return;
361
362 for (band = 0; band < q->bands; band++) {
363 if (arg->count < arg->skip) {
364 arg->count++;
365 continue;
366 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000367 if (arg->fn(sch, band + 1, arg) < 0) {
Alexander Duyck92651942008-09-12 16:29:34 -0700368 arg->stop = 1;
369 break;
370 }
371 arg->count++;
372 }
373}
374
Jiri Pirko6529eab2017-05-17 11:07:55 +0200375static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl)
Alexander Duyck92651942008-09-12 16:29:34 -0700376{
377 struct multiq_sched_data *q = qdisc_priv(sch);
378
379 if (cl)
380 return NULL;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200381 return q->block;
Alexander Duyck92651942008-09-12 16:29:34 -0700382}
383
384static const struct Qdisc_class_ops multiq_class_ops = {
385 .graft = multiq_graft,
386 .leaf = multiq_leaf,
387 .get = multiq_get,
388 .put = multiq_put,
Alexander Duyck92651942008-09-12 16:29:34 -0700389 .walk = multiq_walk,
Jiri Pirko6529eab2017-05-17 11:07:55 +0200390 .tcf_block = multiq_tcf_block,
Alexander Duyck92651942008-09-12 16:29:34 -0700391 .bind_tcf = multiq_bind,
392 .unbind_tcf = multiq_put,
393 .dump = multiq_dump_class,
394 .dump_stats = multiq_dump_class_stats,
395};
396
397static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
398 .next = NULL,
399 .cl_ops = &multiq_class_ops,
400 .id = "multiq",
401 .priv_size = sizeof(struct multiq_sched_data),
402 .enqueue = multiq_enqueue,
403 .dequeue = multiq_dequeue,
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700404 .peek = multiq_peek,
Alexander Duyck92651942008-09-12 16:29:34 -0700405 .init = multiq_init,
406 .reset = multiq_reset,
407 .destroy = multiq_destroy,
408 .change = multiq_tune,
409 .dump = multiq_dump,
410 .owner = THIS_MODULE,
411};
412
413static int __init multiq_module_init(void)
414{
415 return register_qdisc(&multiq_qdisc_ops);
416}
417
418static void __exit multiq_module_exit(void)
419{
420 unregister_qdisc(&multiq_qdisc_ops);
421}
422
423module_init(multiq_module_init)
424module_exit(multiq_module_exit)
425
426MODULE_LICENSE("GPL");