blob: cadfdd4f1e521b3d68b8fa62d5797f3ff604651d [file] [log] [blame]
David S. Miller6ec1c692009-09-06 01:58:51 -07001/*
2 * net/sched/sch_mq.c Classful multiqueue dummy scheduler
3 *
4 * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
David S. Miller6ec1c692009-09-06 01:58:51 -070013#include <linux/kernel.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040014#include <linux/export.h>
David S. Miller6ec1c692009-09-06 01:58:51 -070015#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/skbuff.h>
18#include <net/netlink.h>
19#include <net/pkt_sched.h>
20
21struct mq_sched {
22 struct Qdisc **qdiscs;
23};
24
25static void mq_destroy(struct Qdisc *sch)
26{
27 struct net_device *dev = qdisc_dev(sch);
28 struct mq_sched *priv = qdisc_priv(sch);
29 unsigned int ntx;
30
31 if (!priv->qdiscs)
32 return;
33 for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
34 qdisc_destroy(priv->qdiscs[ntx]);
35 kfree(priv->qdiscs);
36}
37
38static int mq_init(struct Qdisc *sch, struct nlattr *opt)
39{
40 struct net_device *dev = qdisc_dev(sch);
41 struct mq_sched *priv = qdisc_priv(sch);
42 struct netdev_queue *dev_queue;
43 struct Qdisc *qdisc;
44 unsigned int ntx;
45
46 if (sch->parent != TC_H_ROOT)
47 return -EOPNOTSUPP;
48
49 if (!netif_is_multiqueue(dev))
50 return -EOPNOTSUPP;
51
52 /* pre-allocate qdiscs, attachment can't fail */
53 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
54 GFP_KERNEL);
Eric Dumazet87b60cf2017-02-10 10:31:49 -080055 if (!priv->qdiscs)
David S. Miller6ec1c692009-09-06 01:58:51 -070056 return -ENOMEM;
57
58 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
59 dev_queue = netdev_get_tx_queue(dev, ntx);
Eric Dumazet1f27cde2016-03-02 08:21:43 -080060 qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
David S. Miller6ec1c692009-09-06 01:58:51 -070061 TC_H_MAKE(TC_H_MAJ(sch->handle),
62 TC_H_MIN(ntx + 1)));
Eric Dumazet87b60cf2017-02-10 10:31:49 -080063 if (!qdisc)
64 return -ENOMEM;
David S. Miller6ec1c692009-09-06 01:58:51 -070065 priv->qdiscs[ntx] = qdisc;
Eric Dumazet4eaf3b82015-12-01 20:08:51 -080066 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
David S. Miller6ec1c692009-09-06 01:58:51 -070067 }
68
Patrick McHardy23bcf632009-09-09 18:11:23 -070069 sch->flags |= TCQ_F_MQROOT;
David S. Miller6ec1c692009-09-06 01:58:51 -070070 return 0;
David S. Miller6ec1c692009-09-06 01:58:51 -070071}
72
73static void mq_attach(struct Qdisc *sch)
74{
75 struct net_device *dev = qdisc_dev(sch);
76 struct mq_sched *priv = qdisc_priv(sch);
Eric Dumazet95dc1922013-12-05 11:12:02 -080077 struct Qdisc *qdisc, *old;
David S. Miller6ec1c692009-09-06 01:58:51 -070078 unsigned int ntx;
79
80 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
81 qdisc = priv->qdiscs[ntx];
Eric Dumazet95dc1922013-12-05 11:12:02 -080082 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
83 if (old)
84 qdisc_destroy(old);
85#ifdef CONFIG_NET_SCHED
86 if (ntx < dev->real_num_tx_queues)
Jiri Kosina49b49972017-03-08 16:03:32 +010087 qdisc_hash_add(qdisc, false);
Eric Dumazet95dc1922013-12-05 11:12:02 -080088#endif
89
David S. Miller6ec1c692009-09-06 01:58:51 -070090 }
91 kfree(priv->qdiscs);
92 priv->qdiscs = NULL;
93}
94
95static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
96{
97 struct net_device *dev = qdisc_dev(sch);
98 struct Qdisc *qdisc;
99 unsigned int ntx;
100
101 sch->q.qlen = 0;
102 memset(&sch->bstats, 0, sizeof(sch->bstats));
103 memset(&sch->qstats, 0, sizeof(sch->qstats));
104
105 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
106 qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
107 spin_lock_bh(qdisc_lock(qdisc));
108 sch->q.qlen += qdisc->q.qlen;
109 sch->bstats.bytes += qdisc->bstats.bytes;
110 sch->bstats.packets += qdisc->bstats.packets;
David S. Miller6ec1c692009-09-06 01:58:51 -0700111 sch->qstats.backlog += qdisc->qstats.backlog;
112 sch->qstats.drops += qdisc->qstats.drops;
113 sch->qstats.requeues += qdisc->qstats.requeues;
114 sch->qstats.overlimits += qdisc->qstats.overlimits;
115 spin_unlock_bh(qdisc_lock(qdisc));
116 }
117 return 0;
118}
119
120static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
121{
122 struct net_device *dev = qdisc_dev(sch);
123 unsigned long ntx = cl - 1;
124
125 if (ntx >= dev->num_tx_queues)
126 return NULL;
127 return netdev_get_tx_queue(dev, ntx);
128}
129
Jarek Poplawski926e61b2009-09-15 02:53:07 -0700130static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
131 struct tcmsg *tcm)
David S. Miller6ec1c692009-09-06 01:58:51 -0700132{
133 unsigned int ntx = TC_H_MIN(tcm->tcm_parent);
Jarek Poplawski926e61b2009-09-15 02:53:07 -0700134 struct netdev_queue *dev_queue = mq_queue_get(sch, ntx);
David S. Miller6ec1c692009-09-06 01:58:51 -0700135
Jarek Poplawski926e61b2009-09-15 02:53:07 -0700136 if (!dev_queue) {
137 struct net_device *dev = qdisc_dev(sch);
138
139 return netdev_get_tx_queue(dev, 0);
140 }
141 return dev_queue;
David S. Miller6ec1c692009-09-06 01:58:51 -0700142}
143
144static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
145 struct Qdisc **old)
146{
147 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
148 struct net_device *dev = qdisc_dev(sch);
149
150 if (dev->flags & IFF_UP)
151 dev_deactivate(dev);
152
153 *old = dev_graft_qdisc(dev_queue, new);
Eric Dumazet1abbe132012-12-11 15:54:33 +0000154 if (new)
Eric Dumazet4eaf3b82015-12-01 20:08:51 -0800155 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
David S. Miller6ec1c692009-09-06 01:58:51 -0700156 if (dev->flags & IFF_UP)
157 dev_activate(dev);
158 return 0;
159}
160
161static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
162{
163 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
164
165 return dev_queue->qdisc_sleeping;
166}
167
168static unsigned long mq_get(struct Qdisc *sch, u32 classid)
169{
170 unsigned int ntx = TC_H_MIN(classid);
171
172 if (!mq_queue_get(sch, ntx))
173 return 0;
174 return ntx;
175}
176
177static void mq_put(struct Qdisc *sch, unsigned long cl)
178{
David S. Miller6ec1c692009-09-06 01:58:51 -0700179}
180
181static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
182 struct sk_buff *skb, struct tcmsg *tcm)
183{
184 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
185
186 tcm->tcm_parent = TC_H_ROOT;
187 tcm->tcm_handle |= TC_H_MIN(cl);
188 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
189 return 0;
190}
191
192static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
193 struct gnet_dump *d)
194{
195 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
196
197 sch = dev_queue->qdisc_sleeping;
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700198 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
John Fastabendb0ab6f92014-09-28 11:54:24 -0700199 gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
David S. Miller6ec1c692009-09-06 01:58:51 -0700200 return -1;
201 return 0;
202}
203
204static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
205{
206 struct net_device *dev = qdisc_dev(sch);
207 unsigned int ntx;
208
209 if (arg->stop)
210 return;
211
212 arg->count = arg->skip;
213 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
214 if (arg->fn(sch, ntx + 1, arg) < 0) {
215 arg->stop = 1;
216 break;
217 }
218 arg->count++;
219 }
220}
221
222static const struct Qdisc_class_ops mq_class_ops = {
223 .select_queue = mq_select_queue,
224 .graft = mq_graft,
225 .leaf = mq_leaf,
226 .get = mq_get,
227 .put = mq_put,
228 .walk = mq_walk,
229 .dump = mq_dump_class,
230 .dump_stats = mq_dump_class_stats,
231};
232
233struct Qdisc_ops mq_qdisc_ops __read_mostly = {
234 .cl_ops = &mq_class_ops,
235 .id = "mq",
236 .priv_size = sizeof(struct mq_sched),
237 .init = mq_init,
238 .destroy = mq_destroy,
239 .attach = mq_attach,
240 .dump = mq_dump,
241 .owner = THIS_MODULE,
242};