blob: f3cbaecd283af4ce5a78ad90d135d008d2b542d7 [file] [log] [blame]
David S. Miller6ec1c692009-09-06 01:58:51 -07001/*
2 * net/sched/sch_mq.c Classful multiqueue dummy scheduler
3 *
4 * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
David S. Miller6ec1c692009-09-06 01:58:51 -070013#include <linux/kernel.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040014#include <linux/export.h>
David S. Miller6ec1c692009-09-06 01:58:51 -070015#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/skbuff.h>
18#include <net/netlink.h>
19#include <net/pkt_sched.h>
20
21struct mq_sched {
22 struct Qdisc **qdiscs;
23};
24
25static void mq_destroy(struct Qdisc *sch)
26{
27 struct net_device *dev = qdisc_dev(sch);
28 struct mq_sched *priv = qdisc_priv(sch);
29 unsigned int ntx;
30
31 if (!priv->qdiscs)
32 return;
33 for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
34 qdisc_destroy(priv->qdiscs[ntx]);
35 kfree(priv->qdiscs);
36}
37
38static int mq_init(struct Qdisc *sch, struct nlattr *opt)
39{
40 struct net_device *dev = qdisc_dev(sch);
41 struct mq_sched *priv = qdisc_priv(sch);
42 struct netdev_queue *dev_queue;
43 struct Qdisc *qdisc;
44 unsigned int ntx;
45
46 if (sch->parent != TC_H_ROOT)
47 return -EOPNOTSUPP;
48
49 if (!netif_is_multiqueue(dev))
50 return -EOPNOTSUPP;
51
52 /* pre-allocate qdiscs, attachment can't fail */
53 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
54 GFP_KERNEL);
55 if (priv->qdiscs == NULL)
56 return -ENOMEM;
57
58 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
59 dev_queue = netdev_get_tx_queue(dev, ntx);
stephen hemminger6da7c8f2013-08-27 16:19:08 -070060 qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
David S. Miller6ec1c692009-09-06 01:58:51 -070061 TC_H_MAKE(TC_H_MAJ(sch->handle),
62 TC_H_MIN(ntx + 1)));
63 if (qdisc == NULL)
64 goto err;
David S. Miller6ec1c692009-09-06 01:58:51 -070065 priv->qdiscs[ntx] = qdisc;
Eric Dumazet1abbe132012-12-11 15:54:33 +000066 qdisc->flags |= TCQ_F_ONETXQUEUE;
David S. Miller6ec1c692009-09-06 01:58:51 -070067 }
68
Patrick McHardy23bcf632009-09-09 18:11:23 -070069 sch->flags |= TCQ_F_MQROOT;
David S. Miller6ec1c692009-09-06 01:58:51 -070070 return 0;
71
72err:
73 mq_destroy(sch);
74 return -ENOMEM;
75}
76
77static void mq_attach(struct Qdisc *sch)
78{
79 struct net_device *dev = qdisc_dev(sch);
80 struct mq_sched *priv = qdisc_priv(sch);
Eric Dumazet95dc1922013-12-05 11:12:02 -080081 struct Qdisc *qdisc, *old;
David S. Miller6ec1c692009-09-06 01:58:51 -070082 unsigned int ntx;
83
84 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
85 qdisc = priv->qdiscs[ntx];
Eric Dumazet95dc1922013-12-05 11:12:02 -080086 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
87 if (old)
88 qdisc_destroy(old);
89#ifdef CONFIG_NET_SCHED
90 if (ntx < dev->real_num_tx_queues)
91 qdisc_list_add(qdisc);
92#endif
93
David S. Miller6ec1c692009-09-06 01:58:51 -070094 }
95 kfree(priv->qdiscs);
96 priv->qdiscs = NULL;
97}
98
99static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
100{
101 struct net_device *dev = qdisc_dev(sch);
102 struct Qdisc *qdisc;
103 unsigned int ntx;
104
105 sch->q.qlen = 0;
106 memset(&sch->bstats, 0, sizeof(sch->bstats));
107 memset(&sch->qstats, 0, sizeof(sch->qstats));
108
109 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
110 qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
111 spin_lock_bh(qdisc_lock(qdisc));
112 sch->q.qlen += qdisc->q.qlen;
113 sch->bstats.bytes += qdisc->bstats.bytes;
114 sch->bstats.packets += qdisc->bstats.packets;
David S. Miller6ec1c692009-09-06 01:58:51 -0700115 sch->qstats.backlog += qdisc->qstats.backlog;
116 sch->qstats.drops += qdisc->qstats.drops;
117 sch->qstats.requeues += qdisc->qstats.requeues;
118 sch->qstats.overlimits += qdisc->qstats.overlimits;
119 spin_unlock_bh(qdisc_lock(qdisc));
120 }
121 return 0;
122}
123
124static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
125{
126 struct net_device *dev = qdisc_dev(sch);
127 unsigned long ntx = cl - 1;
128
129 if (ntx >= dev->num_tx_queues)
130 return NULL;
131 return netdev_get_tx_queue(dev, ntx);
132}
133
Jarek Poplawski926e61b2009-09-15 02:53:07 -0700134static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
135 struct tcmsg *tcm)
David S. Miller6ec1c692009-09-06 01:58:51 -0700136{
137 unsigned int ntx = TC_H_MIN(tcm->tcm_parent);
Jarek Poplawski926e61b2009-09-15 02:53:07 -0700138 struct netdev_queue *dev_queue = mq_queue_get(sch, ntx);
David S. Miller6ec1c692009-09-06 01:58:51 -0700139
Jarek Poplawski926e61b2009-09-15 02:53:07 -0700140 if (!dev_queue) {
141 struct net_device *dev = qdisc_dev(sch);
142
143 return netdev_get_tx_queue(dev, 0);
144 }
145 return dev_queue;
David S. Miller6ec1c692009-09-06 01:58:51 -0700146}
147
148static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
149 struct Qdisc **old)
150{
151 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
152 struct net_device *dev = qdisc_dev(sch);
153
154 if (dev->flags & IFF_UP)
155 dev_deactivate(dev);
156
157 *old = dev_graft_qdisc(dev_queue, new);
Eric Dumazet1abbe132012-12-11 15:54:33 +0000158 if (new)
159 new->flags |= TCQ_F_ONETXQUEUE;
David S. Miller6ec1c692009-09-06 01:58:51 -0700160 if (dev->flags & IFF_UP)
161 dev_activate(dev);
162 return 0;
163}
164
165static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
166{
167 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
168
169 return dev_queue->qdisc_sleeping;
170}
171
172static unsigned long mq_get(struct Qdisc *sch, u32 classid)
173{
174 unsigned int ntx = TC_H_MIN(classid);
175
176 if (!mq_queue_get(sch, ntx))
177 return 0;
178 return ntx;
179}
180
181static void mq_put(struct Qdisc *sch, unsigned long cl)
182{
David S. Miller6ec1c692009-09-06 01:58:51 -0700183}
184
185static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
186 struct sk_buff *skb, struct tcmsg *tcm)
187{
188 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
189
190 tcm->tcm_parent = TC_H_ROOT;
191 tcm->tcm_handle |= TC_H_MIN(cl);
192 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
193 return 0;
194}
195
196static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
197 struct gnet_dump *d)
198{
199 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
200
201 sch = dev_queue->qdisc_sleeping;
John Fastabend22e0f8b2014-09-28 11:52:56 -0700202 if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 ||
John Fastabendb0ab6f92014-09-28 11:54:24 -0700203 gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
David S. Miller6ec1c692009-09-06 01:58:51 -0700204 return -1;
205 return 0;
206}
207
208static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
209{
210 struct net_device *dev = qdisc_dev(sch);
211 unsigned int ntx;
212
213 if (arg->stop)
214 return;
215
216 arg->count = arg->skip;
217 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
218 if (arg->fn(sch, ntx + 1, arg) < 0) {
219 arg->stop = 1;
220 break;
221 }
222 arg->count++;
223 }
224}
225
226static const struct Qdisc_class_ops mq_class_ops = {
227 .select_queue = mq_select_queue,
228 .graft = mq_graft,
229 .leaf = mq_leaf,
230 .get = mq_get,
231 .put = mq_put,
232 .walk = mq_walk,
233 .dump = mq_dump_class,
234 .dump_stats = mq_dump_class_stats,
235};
236
237struct Qdisc_ops mq_qdisc_ops __read_mostly = {
238 .cl_ops = &mq_class_ops,
239 .id = "mq",
240 .priv_size = sizeof(struct mq_sched),
241 .init = mq_init,
242 .destroy = mq_destroy,
243 .attach = mq_attach,
244 .dump = mq_dump,
245 .owner = THIS_MODULE,
246};