blob: fb4fb71c68cfa6557d042b2bbc5ccebc5493e8a4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* net/sched/sch_dsmark.c - Differentiated Services field marker */
2
3/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
4
5
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/module.h>
7#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09008#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/errno.h>
12#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/rtnetlink.h>
David S. Miller5b0ac722008-01-21 02:21:45 -080014#include <linux/bitops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <net/pkt_sched.h>
Jiri Pirkocf1facd2017-02-09 14:38:56 +010016#include <net/pkt_cls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <net/dsfield.h>
18#include <net/inet_ecn.h>
19#include <asm/byteorder.h>
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021/*
22 * classid class marking
23 * ------- ----- -------
24 * n/a 0 n/a
25 * x:0 1 use entry [0]
26 * ... ... ...
27 * x:y y>0 y+1 use entry [y]
28 * ... ... ...
29 * x:indices-1 indices use entry [indices-1]
30 * ... ... ...
31 * x:y y+1 use entry [y & (indices-1)]
32 * ... ... ...
33 * 0xffff 0x10000 use entry [indices-1]
34 */
35
36
37#define NO_DEFAULT_INDEX (1 << 16)
38
Eric Dumazet47bbbb32015-09-17 16:37:13 -070039struct mask_value {
40 u8 mask;
41 u8 value;
42};
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044struct dsmark_qdisc_data {
45 struct Qdisc *q;
John Fastabend25d8c0d2014-09-12 20:05:27 -070046 struct tcf_proto __rcu *filter_list;
Jiri Pirko6529eab2017-05-17 11:07:55 +020047 struct tcf_block *block;
Eric Dumazet47bbbb32015-09-17 16:37:13 -070048 struct mask_value *mv;
Thomas Grafaf0d1142005-06-18 22:53:29 -070049 u16 indices;
Eric Dumazet47bbbb32015-09-17 16:37:13 -070050 u8 set_tc_index;
Thomas Grafaf0d1142005-06-18 22:53:29 -070051 u32 default_index; /* index range is 0...0xffff */
Eric Dumazet47bbbb32015-09-17 16:37:13 -070052#define DSMARK_EMBEDDED_SZ 16
53 struct mask_value embedded[DSMARK_EMBEDDED_SZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -070054};
55
Thomas Graf758cc432005-06-18 22:52:54 -070056static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
57{
Yang Yingliang17569fa2013-12-10 20:55:29 +080058 return index <= p->indices && index > 0;
Thomas Graf758cc432005-06-18 22:52:54 -070059}
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61/* ------------------------- Class/flow operations ------------------------- */
62
Thomas Grafaf0d1142005-06-18 22:53:29 -070063static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
64 struct Qdisc *new, struct Qdisc **old)
Linus Torvalds1da177e2005-04-16 15:20:36 -070065{
Stephen Hemminger81da99e2008-01-21 00:50:09 -080066 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Yang Yingliangc76f2a22013-12-23 17:39:00 +080068 pr_debug("%s(sch %p,[qdisc %p],new %p,old %p)\n",
69 __func__, sch, p, new, old);
Thomas Graf486b53e2005-05-31 15:16:52 -070070
71 if (new == NULL) {
Changli Gao3511c912010-10-16 13:04:08 +000072 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
Patrick McHardy9f9afec2006-11-29 17:35:18 -080073 sch->handle);
Thomas Graf486b53e2005-05-31 15:16:52 -070074 if (new == NULL)
75 new = &noop_qdisc;
76 }
77
WANG Cong86a79962016-02-25 14:55:00 -080078 *old = qdisc_replace(sch, new, &p->q);
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090079 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
Linus Torvalds1da177e2005-04-16 15:20:36 -070082static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
83{
Stephen Hemminger81da99e2008-01-21 00:50:09 -080084 struct dsmark_qdisc_data *p = qdisc_priv(sch);
85 return p->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -070086}
87
WANG Cong143976c2017-08-24 16:51:29 -070088static unsigned long dsmark_find(struct Qdisc *sch, u32 classid)
Linus Torvalds1da177e2005-04-16 15:20:36 -070089{
Thomas Grafaf0d1142005-06-18 22:53:29 -070090 return TC_H_MIN(classid) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091}
92
Linus Torvalds1da177e2005-04-16 15:20:36 -070093static unsigned long dsmark_bind_filter(struct Qdisc *sch,
Thomas Grafaf0d1142005-06-18 22:53:29 -070094 unsigned long parent, u32 classid)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
WANG Cong143976c2017-08-24 16:51:29 -070096 pr_debug("%s(sch %p,[qdisc %p],classid %x)\n",
97 __func__, sch, qdisc_priv(sch), classid);
98
99 return dsmark_find(sch, classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100}
101
WANG Cong143976c2017-08-24 16:51:29 -0700102static void dsmark_unbind_filter(struct Qdisc *sch, unsigned long cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103{
104}
105
Patrick McHardy27a34212008-01-23 20:35:39 -0800106static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
107 [TCA_DSMARK_INDICES] = { .type = NLA_U16 },
108 [TCA_DSMARK_DEFAULT_INDEX] = { .type = NLA_U16 },
109 [TCA_DSMARK_SET_TC_INDEX] = { .type = NLA_FLAG },
110 [TCA_DSMARK_MASK] = { .type = NLA_U8 },
111 [TCA_DSMARK_VALUE] = { .type = NLA_U8 },
112};
113
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
Patrick McHardy1e904742008-01-22 22:11:17 -0800115 struct nlattr **tca, unsigned long *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800117 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800118 struct nlattr *opt = tca[TCA_OPTIONS];
119 struct nlattr *tb[TCA_DSMARK_MAX + 1];
Thomas Graf758cc432005-06-18 22:52:54 -0700120 int err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800122 pr_debug("%s(sch %p,[qdisc %p],classid %x,parent %x), arg 0x%lx\n",
123 __func__, sch, p, classid, parent, *arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Thomas Graf758cc432005-06-18 22:52:54 -0700125 if (!dsmark_valid_index(p, *arg)) {
126 err = -ENOENT;
Patrick McHardy1e904742008-01-22 22:11:17 -0800127 goto errout;
Thomas Graf758cc432005-06-18 22:52:54 -0700128 }
129
Patrick McHardycee63722008-01-23 20:33:32 -0800130 if (!opt)
Patrick McHardy1e904742008-01-22 22:11:17 -0800131 goto errout;
Thomas Graf758cc432005-06-18 22:52:54 -0700132
Johannes Bergfceb6432017-04-12 14:34:07 +0200133 err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy, NULL);
Patrick McHardycee63722008-01-23 20:33:32 -0800134 if (err < 0)
Patrick McHardy27a34212008-01-23 20:35:39 -0800135 goto errout;
Patrick McHardycee63722008-01-23 20:33:32 -0800136
Patrick McHardy27a34212008-01-23 20:35:39 -0800137 if (tb[TCA_DSMARK_VALUE])
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700138 p->mv[*arg - 1].value = nla_get_u8(tb[TCA_DSMARK_VALUE]);
Thomas Graf758cc432005-06-18 22:52:54 -0700139
Patrick McHardy1e904742008-01-22 22:11:17 -0800140 if (tb[TCA_DSMARK_MASK])
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700141 p->mv[*arg - 1].mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
Thomas Graf758cc432005-06-18 22:52:54 -0700142
143 err = 0;
144
Patrick McHardy1e904742008-01-22 22:11:17 -0800145errout:
Thomas Graf758cc432005-06-18 22:52:54 -0700146 return err;
147}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
Thomas Grafaf0d1142005-06-18 22:53:29 -0700149static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800151 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Thomas Grafaf0d1142005-06-18 22:53:29 -0700153 if (!dsmark_valid_index(p, arg))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 return -EINVAL;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900155
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700156 p->mv[arg - 1].mask = 0xff;
157 p->mv[arg - 1].value = 0;
Thomas Grafaf0d1142005-06-18 22:53:29 -0700158
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 return 0;
160}
161
Stephen Hemminger9d127fb2008-01-21 02:24:21 -0800162static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800164 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 int i;
166
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800167 pr_debug("%s(sch %p,[qdisc %p],walker %p)\n",
168 __func__, sch, p, walker);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 if (walker->stop)
171 return;
Thomas Grafaf0d1142005-06-18 22:53:29 -0700172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 for (i = 0; i < p->indices; i++) {
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700174 if (p->mv[i].mask == 0xff && !p->mv[i].value)
Thomas Graf0451eb02005-05-31 15:15:58 -0700175 goto ignore;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 if (walker->count >= walker->skip) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000177 if (walker->fn(sch, i + 1, walker) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 walker->stop = 1;
179 break;
180 }
181 }
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900182ignore:
Thomas Graf0451eb02005-05-31 15:15:58 -0700183 walker->count++;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900184 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185}
186
Jiri Pirko6529eab2017-05-17 11:07:55 +0200187static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800189 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Jiri Pirko6529eab2017-05-17 11:07:55 +0200190
191 return p->block;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192}
193
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194/* --------------------------- Qdisc operations ---------------------------- */
195
Eric Dumazet520ac302016-06-21 23:16:49 -0700196static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
197 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800199 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700200 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800202 pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 if (p->set_tc_index) {
Eric Dumazetaea92fb2017-03-17 08:05:28 -0700205 int wlen = skb_network_offset(skb);
206
Jiri Pirkod8b96052015-01-13 17:13:43 +0100207 switch (tc_skb_protocol(skb)) {
Arnaldo Carvalho de Melo60678042008-09-20 22:20:49 -0700208 case htons(ETH_P_IP):
Eric Dumazetaea92fb2017-03-17 08:05:28 -0700209 wlen += sizeof(struct iphdr);
210 if (!pskb_may_pull(skb, wlen) ||
211 skb_try_make_writable(skb, wlen))
Stephen Hemminger9d127fb2008-01-21 02:24:21 -0800212 goto drop;
Stephen Hemminger4c307192008-01-21 02:23:49 -0800213
Stephen Hemminger9d127fb2008-01-21 02:24:21 -0800214 skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
215 & ~INET_ECN_MASK;
216 break;
Stephen Hemminger4c307192008-01-21 02:23:49 -0800217
Arnaldo Carvalho de Melo60678042008-09-20 22:20:49 -0700218 case htons(ETH_P_IPV6):
Eric Dumazetaea92fb2017-03-17 08:05:28 -0700219 wlen += sizeof(struct ipv6hdr);
220 if (!pskb_may_pull(skb, wlen) ||
221 skb_try_make_writable(skb, wlen))
Stephen Hemminger9d127fb2008-01-21 02:24:21 -0800222 goto drop;
Stephen Hemminger4c307192008-01-21 02:23:49 -0800223
Stephen Hemminger9d127fb2008-01-21 02:24:21 -0800224 skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
225 & ~INET_ECN_MASK;
226 break;
227 default:
228 skb->tc_index = 0;
229 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700230 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 }
Thomas Grafaf0d1142005-06-18 22:53:29 -0700232
233 if (TC_H_MAJ(skb->priority) == sch->handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 skb->tc_index = TC_H_MIN(skb->priority);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700235 else {
236 struct tcf_result res;
John Fastabend25d8c0d2014-09-12 20:05:27 -0700237 struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
Jiri Pirko87d83092017-05-17 11:07:54 +0200238 int result = tcf_classify(skb, fl, &res, false);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700239
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800240 pr_debug("result %d class 0x%04x\n", result, res.classid);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700241
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 switch (result) {
Patrick McHardyf6853e22007-07-15 00:02:10 -0700243#ifdef CONFIG_NET_CLS_ACT
244 case TC_ACT_QUEUED:
245 case TC_ACT_STOLEN:
Jiri Pirkoe25ea212017-06-06 14:12:02 +0200246 case TC_ACT_TRAP:
Eric Dumazet520ac302016-06-21 23:16:49 -0700247 __qdisc_drop(skb, to_free);
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700248 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
Stephen Hemminger4c307192008-01-21 02:23:49 -0800249
Patrick McHardyf6853e22007-07-15 00:02:10 -0700250 case TC_ACT_SHOT:
Stephen Hemminger4c307192008-01-21 02:23:49 -0800251 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252#endif
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700253 case TC_ACT_OK:
Patrick McHardyf6853e22007-07-15 00:02:10 -0700254 skb->tc_index = TC_H_MIN(res.classid);
255 break;
Stephen Hemminger4c307192008-01-21 02:23:49 -0800256
Patrick McHardyf6853e22007-07-15 00:02:10 -0700257 default:
258 if (p->default_index != NO_DEFAULT_INDEX)
259 skb->tc_index = p->default_index;
260 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700261 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
Eric Dumazet520ac302016-06-21 23:16:49 -0700264 err = qdisc_enqueue(skb, p->q, to_free);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700265 if (err != NET_XMIT_SUCCESS) {
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700266 if (net_xmit_drop_count(err))
John Fastabend25331d62014-09-28 11:53:29 -0700267 qdisc_qstats_drop(sch);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700268 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 }
Thomas Grafaf0d1142005-06-18 22:53:29 -0700270
WANG Congbdf17662016-02-25 14:55:03 -0800271 qdisc_qstats_backlog_inc(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 sch->q.qlen++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
Thomas Grafaf0d1142005-06-18 22:53:29 -0700274 return NET_XMIT_SUCCESS;
Stephen Hemminger4c307192008-01-21 02:23:49 -0800275
276drop:
Eric Dumazet520ac302016-06-21 23:16:49 -0700277 qdisc_drop(skb, sch, to_free);
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700278 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Thomas Grafaf0d1142005-06-18 22:53:29 -0700279}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
281static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
282{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800283 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 struct sk_buff *skb;
Thomas Grafaf0d1142005-06-18 22:53:29 -0700285 u32 index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800287 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700288
Kyeong Yoof8b33d82016-03-07 17:07:57 +1300289 skb = qdisc_dequeue_peeked(p->q);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700290 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 return NULL;
Thomas Grafaf0d1142005-06-18 22:53:29 -0700292
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800293 qdisc_bstats_update(sch, skb);
WANG Congbdf17662016-02-25 14:55:03 -0800294 qdisc_qstats_backlog_dec(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 sch->q.qlen--;
Thomas Grafaf0d1142005-06-18 22:53:29 -0700296
297 index = skb->tc_index & (p->indices - 1);
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800298 pr_debug("index %d->%d\n", skb->tc_index, index);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700299
Jiri Pirkod8b96052015-01-13 17:13:43 +0100300 switch (tc_skb_protocol(skb)) {
Arnaldo Carvalho de Melo60678042008-09-20 22:20:49 -0700301 case htons(ETH_P_IP):
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700302 ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
303 p->mv[index].value);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 break;
Arnaldo Carvalho de Melo60678042008-09-20 22:20:49 -0700305 case htons(ETH_P_IPV6):
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700306 ipv6_change_dsfield(ipv6_hdr(skb), p->mv[index].mask,
307 p->mv[index].value);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 break;
Stephen Hemminger9d127fb2008-01-21 02:24:21 -0800309 default:
310 /*
311 * Only complain if a change was actually attempted.
312 * This way, we can send non-IP traffic through dsmark
313 * and don't need yet another qdisc as a bypass.
314 */
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700315 if (p->mv[index].mask != 0xff || p->mv[index].value)
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800316 pr_warn("%s: unsupported protocol %d\n",
Jiri Pirkod8b96052015-01-13 17:13:43 +0100317 __func__, ntohs(tc_skb_protocol(skb)));
Stephen Hemminger9d127fb2008-01-21 02:24:21 -0800318 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700319 }
Thomas Grafaf0d1142005-06-18 22:53:29 -0700320
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 return skb;
322}
323
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700324static struct sk_buff *dsmark_peek(struct Qdisc *sch)
325{
326 struct dsmark_qdisc_data *p = qdisc_priv(sch);
327
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800328 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700329
330 return p->q->ops->peek(p->q);
331}
332
Patrick McHardy1e904742008-01-22 22:11:17 -0800333static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800335 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800336 struct nlattr *tb[TCA_DSMARK_MAX + 1];
David S. Miller9d4f97f2017-05-17 16:03:16 -0400337 int err = -EINVAL;
Thomas Graf758cc432005-06-18 22:52:54 -0700338 u32 default_index = NO_DEFAULT_INDEX;
339 u16 indices;
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700340 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800342 pr_debug("%s(sch %p,[qdisc %p],opt %p)\n", __func__, sch, p, opt);
Thomas Graf758cc432005-06-18 22:52:54 -0700343
Patrick McHardycee63722008-01-23 20:33:32 -0800344 if (!opt)
Thomas Graf758cc432005-06-18 22:52:54 -0700345 goto errout;
346
Jiri Pirko69d78ef2017-10-13 14:00:57 +0200347 err = tcf_block_get(&p->block, &p->filter_list, sch);
Jiri Pirko6529eab2017-05-17 11:07:55 +0200348 if (err)
349 return err;
350
Johannes Bergfceb6432017-04-12 14:34:07 +0200351 err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy, NULL);
Patrick McHardycee63722008-01-23 20:33:32 -0800352 if (err < 0)
353 goto errout;
354
355 err = -EINVAL;
Patrick McHardy1e904742008-01-22 22:11:17 -0800356 indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
David S. Miller5b0ac722008-01-21 02:21:45 -0800357
358 if (hweight32(indices) != 1)
Thomas Graf758cc432005-06-18 22:52:54 -0700359 goto errout;
360
Patrick McHardy27a34212008-01-23 20:35:39 -0800361 if (tb[TCA_DSMARK_DEFAULT_INDEX])
Patrick McHardy1e904742008-01-22 22:11:17 -0800362 default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]);
Thomas Graf758cc432005-06-18 22:52:54 -0700363
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700364 if (indices <= DSMARK_EMBEDDED_SZ)
365 p->mv = p->embedded;
366 else
367 p->mv = kmalloc_array(indices, sizeof(*p->mv), GFP_KERNEL);
368 if (!p->mv) {
Thomas Graf758cc432005-06-18 22:52:54 -0700369 err = -ENOMEM;
370 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 }
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700372 for (i = 0; i < indices; i++) {
373 p->mv[i].mask = 0xff;
374 p->mv[i].value = 0;
375 }
Thomas Graf758cc432005-06-18 22:52:54 -0700376 p->indices = indices;
377 p->default_index = default_index;
Patrick McHardy1e904742008-01-22 22:11:17 -0800378 p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
Thomas Graf758cc432005-06-18 22:52:54 -0700379
Changli Gao3511c912010-10-16 13:04:08 +0000380 p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle);
Thomas Graf758cc432005-06-18 22:52:54 -0700381 if (p->q == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 p->q = &noop_qdisc;
Jiri Kosina49b49972017-03-08 16:03:32 +0100383 else
384 qdisc_hash_add(p->q, true);
Thomas Graf758cc432005-06-18 22:52:54 -0700385
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800386 pr_debug("%s: qdisc %p\n", __func__, p->q);
Thomas Graf758cc432005-06-18 22:52:54 -0700387
388 err = 0;
389errout:
Thomas Graf758cc432005-06-18 22:52:54 -0700390 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391}
392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393static void dsmark_reset(struct Qdisc *sch)
394{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800395 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800397 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 qdisc_reset(p->q);
WANG Congbdf17662016-02-25 14:55:03 -0800399 sch->qstats.backlog = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 sch->q.qlen = 0;
401}
402
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403static void dsmark_destroy(struct Qdisc *sch)
404{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800405 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800407 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
Thomas Grafaf0d1142005-06-18 22:53:29 -0700408
Jiri Pirko6529eab2017-05-17 11:07:55 +0200409 tcf_block_put(p->block);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 qdisc_destroy(p->q);
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700411 if (p->mv != p->embedded)
412 kfree(p->mv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413}
414
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
Thomas Graf02f23f02005-06-18 22:53:12 -0700416 struct sk_buff *skb, struct tcmsg *tcm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800418 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800419 struct nlattr *opts = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
Yang Yingliangc76f2a22013-12-23 17:39:00 +0800421 pr_debug("%s(sch %p,[qdisc %p],class %ld\n", __func__, sch, p, cl);
Thomas Graf02f23f02005-06-18 22:53:12 -0700422
423 if (!dsmark_valid_index(p, cl))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 return -EINVAL;
Thomas Graf02f23f02005-06-18 22:53:12 -0700425
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000426 tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
Patrick McHardycdc7f8e2006-03-20 19:01:06 -0800427 tcm->tcm_info = p->q->handle;
Thomas Graf02f23f02005-06-18 22:53:12 -0700428
Patrick McHardy1e904742008-01-22 22:11:17 -0800429 opts = nla_nest_start(skb, TCA_OPTIONS);
430 if (opts == NULL)
431 goto nla_put_failure;
Eric Dumazet47bbbb32015-09-17 16:37:13 -0700432 if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mv[cl - 1].mask) ||
433 nla_put_u8(skb, TCA_DSMARK_VALUE, p->mv[cl - 1].value))
David S. Miller1b34ec42012-03-29 05:11:39 -0400434 goto nla_put_failure;
Thomas Graf02f23f02005-06-18 22:53:12 -0700435
Patrick McHardy1e904742008-01-22 22:11:17 -0800436 return nla_nest_end(skb, opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
Patrick McHardy1e904742008-01-22 22:11:17 -0800438nla_put_failure:
Thomas Grafbc3ed282008-06-03 16:36:54 -0700439 nla_nest_cancel(skb, opts);
440 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441}
442
443static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
444{
Stephen Hemminger81da99e2008-01-21 00:50:09 -0800445 struct dsmark_qdisc_data *p = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800446 struct nlattr *opts = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
Patrick McHardy1e904742008-01-22 22:11:17 -0800448 opts = nla_nest_start(skb, TCA_OPTIONS);
449 if (opts == NULL)
450 goto nla_put_failure;
David S. Miller1b34ec42012-03-29 05:11:39 -0400451 if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
452 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
David S. Miller1b34ec42012-03-29 05:11:39 -0400454 if (p->default_index != NO_DEFAULT_INDEX &&
455 nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
456 goto nla_put_failure;
Thomas Graf02f23f02005-06-18 22:53:12 -0700457
David S. Miller1b34ec42012-03-29 05:11:39 -0400458 if (p->set_tc_index &&
459 nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
460 goto nla_put_failure;
Thomas Graf02f23f02005-06-18 22:53:12 -0700461
Patrick McHardy1e904742008-01-22 22:11:17 -0800462 return nla_nest_end(skb, opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
Patrick McHardy1e904742008-01-22 22:11:17 -0800464nla_put_failure:
Thomas Grafbc3ed282008-06-03 16:36:54 -0700465 nla_nest_cancel(skb, opts);
466 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467}
468
Eric Dumazet20fea082007-11-14 01:44:41 -0800469static const struct Qdisc_class_ops dsmark_class_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 .graft = dsmark_graft,
471 .leaf = dsmark_leaf,
WANG Cong143976c2017-08-24 16:51:29 -0700472 .find = dsmark_find,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 .change = dsmark_change,
474 .delete = dsmark_delete,
475 .walk = dsmark_walk,
Jiri Pirko6529eab2017-05-17 11:07:55 +0200476 .tcf_block = dsmark_tcf_block,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 .bind_tcf = dsmark_bind_filter,
WANG Cong143976c2017-08-24 16:51:29 -0700478 .unbind_tcf = dsmark_unbind_filter,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 .dump = dsmark_dump_class,
480};
481
Eric Dumazet20fea082007-11-14 01:44:41 -0800482static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 .next = NULL,
484 .cl_ops = &dsmark_class_ops,
485 .id = "dsmark",
486 .priv_size = sizeof(struct dsmark_qdisc_data),
487 .enqueue = dsmark_enqueue,
488 .dequeue = dsmark_dequeue,
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700489 .peek = dsmark_peek,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 .init = dsmark_init,
491 .reset = dsmark_reset,
492 .destroy = dsmark_destroy,
493 .change = NULL,
494 .dump = dsmark_dump,
495 .owner = THIS_MODULE,
496};
497
498static int __init dsmark_module_init(void)
499{
500 return register_qdisc(&dsmark_qdisc_ops);
501}
Thomas Grafaf0d1142005-06-18 22:53:29 -0700502
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900503static void __exit dsmark_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504{
505 unregister_qdisc(&dsmark_qdisc_ops);
506}
Thomas Grafaf0d1142005-06-18 22:53:29 -0700507
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508module_init(dsmark_module_init)
509module_exit(dsmark_module_exit)
Thomas Grafaf0d1142005-06-18 22:53:29 -0700510
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511MODULE_LICENSE("GPL");