blob: d8c2b6baaad2d3fb9af0923e9440713980bb6a9c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_sfq.c Stochastic Fairness Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/jiffies.h>
16#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/in.h>
18#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/init.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070020#include <linux/skbuff.h>
Alexey Kuznetsov32740dd2007-09-30 17:51:33 -070021#include <linux/jhash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Eric Dumazet817fb152011-01-20 00:14:58 +000023#include <linux/vmalloc.h>
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070024#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <net/pkt_sched.h>
Eric Dumazetddecf0f2012-01-06 06:31:44 +000026#include <net/red.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28
29/* Stochastic Fairness Queuing algorithm.
30 =======================================
31
32 Source:
33 Paul E. McKenney "Stochastic Fairness Queuing",
34 IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.
35
36 Paul E. McKenney "Stochastic Fairness Queuing",
37 "Interworking: Research and Experience", v.2, 1991, p.113-131.
38
39
40 See also:
41 M. Shreedhar and George Varghese "Efficient Fair
42 Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
43
44
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090045 This is not the thing that is usually called (W)FQ nowadays.
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 It does not use any timestamp mechanism, but instead
47 processes queues in round-robin order.
48
49 ADVANTAGE:
50
51 - It is very cheap. Both CPU and memory requirements are minimal.
52
53 DRAWBACKS:
54
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090055 - "Stochastic" -> It is not 100% fair.
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 When hash collisions occur, several flows are considered as one.
57
58 - "Round-robin" -> It introduces larger delays than virtual clock
59 based schemes, and should not be used for isolating interactive
60 traffic from non-interactive. It means, that this scheduler
61 should be used as leaf of CBQ or P3, which put interactive traffic
62 to higher priority band.
63
64 We still need true WFQ for top level CSZ, but using WFQ
65 for the best effort traffic is absolutely pointless:
66 SFQ is superior for this purpose.
67
68 IMPLEMENTATION:
Eric Dumazet18cb8092012-01-04 14:18:38 +000069 This implementation limits :
70 - maximal queue length per flow to 127 packets.
71 - max mtu to 2^18-1;
72 - max 65408 flows,
73 - number of hash buckets to 65536.
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75 It is easy to increase these values, but not in flight. */
76
Eric Dumazet18cb8092012-01-04 14:18:38 +000077#define SFQ_MAX_DEPTH 127 /* max number of packets per flow */
78#define SFQ_DEFAULT_FLOWS 128
79#define SFQ_MAX_FLOWS (0x10000 - SFQ_MAX_DEPTH - 1) /* max number of flows */
80#define SFQ_EMPTY_SLOT 0xffff
Eric Dumazet817fb152011-01-20 00:14:58 +000081#define SFQ_DEFAULT_HASH_DIVISOR 1024
82
Eric Dumazeteeaeb062010-12-28 21:53:33 +000083/* We use 16 bits to store allot, and want to handle packets up to 64K
84 * Scale allot by 8 (1<<3) so that no overflow occurs.
85 */
86#define SFQ_ALLOT_SHIFT 3
87#define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Eric Dumazet18cb8092012-01-04 14:18:38 +000089/* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */
90typedef u16 sfq_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
Eric Dumazeteda83e32010-12-20 12:54:58 +000092/*
93 * We dont use pointers to save space.
Eric Dumazet18cb8092012-01-04 14:18:38 +000094 * Small indexes [0 ... SFQ_MAX_FLOWS - 1] are 'pointers' to slots[] array
95 * while following values [SFQ_MAX_FLOWS ... SFQ_MAX_FLOWS + SFQ_MAX_DEPTH]
Eric Dumazeteda83e32010-12-20 12:54:58 +000096 * are 'pointers' to dep[] array
97 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +000098struct sfq_head {
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 sfq_index next;
100 sfq_index prev;
101};
102
Eric Dumazeteda83e32010-12-20 12:54:58 +0000103struct sfq_slot {
104 struct sk_buff *skblist_next;
105 struct sk_buff *skblist_prev;
106 sfq_index qlen; /* number of skbs in skblist */
Eric Dumazet18cb8092012-01-04 14:18:38 +0000107 sfq_index next; /* next slot in sfq RR chain */
Eric Dumazeteda83e32010-12-20 12:54:58 +0000108 struct sfq_head dep; /* anchor in dep[] chains */
109 unsigned short hash; /* hash value (index in ht[]) */
110 short allot; /* credit for this slot */
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000111
112 unsigned int backlog;
113 struct red_vars vars;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000114};
115
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000116struct sfq_sched_data {
Eric Dumazet18cb8092012-01-04 14:18:38 +0000117/* frequently used fields */
118 int limit; /* limit of total number of packets in this qdisc */
Eric Dumazet817fb152011-01-20 00:14:58 +0000119 unsigned int divisor; /* number of slots in hash table */
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000120 u8 headdrop;
121 u8 maxdepth; /* limit of packets per flow */
Eric Dumazet18cb8092012-01-04 14:18:38 +0000122
Alexey Kuznetsov32740dd2007-09-30 17:51:33 -0700123 u32 perturbation;
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000124 u8 cur_depth; /* depth of longest slot */
125 u8 flags;
Eric Dumazeteeaeb062010-12-28 21:53:33 +0000126 unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
John Fastabend25d8c0d2014-09-12 20:05:27 -0700127 struct tcf_proto __rcu *filter_list;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000128 sfq_index *ht; /* Hash table ('divisor' slots) */
129 struct sfq_slot *slots; /* Flows table ('maxflows' entries) */
130
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000131 struct red_parms *red_parms;
132 struct tc_sfqred_stats stats;
133 struct sfq_slot *tail; /* current slot in round */
134
Eric Dumazet18cb8092012-01-04 14:18:38 +0000135 struct sfq_head dep[SFQ_MAX_DEPTH + 1];
136 /* Linked lists of slots, indexed by depth
137 * dep[0] : list of unused flows
138 * dep[1] : list of flows with 1 packet
139 * dep[X] : list of flows with X packets
140 */
141
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000142 unsigned int maxflows; /* number of flows in flows array */
Eric Dumazet18cb8092012-01-04 14:18:38 +0000143 int perturb_period;
144 unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
145 struct timer_list perturb_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146};
147
Eric Dumazeteda83e32010-12-20 12:54:58 +0000148/*
149 * sfq_head are either in a sfq_slot or in dep[] array
150 */
151static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
152{
Eric Dumazet18cb8092012-01-04 14:18:38 +0000153 if (val < SFQ_MAX_FLOWS)
Eric Dumazeteda83e32010-12-20 12:54:58 +0000154 return &q->slots[val].dep;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000155 return &q->dep[val - SFQ_MAX_FLOWS];
Eric Dumazeteda83e32010-12-20 12:54:58 +0000156}
157
Eric Dumazet11fca932011-11-29 03:40:45 +0000158static unsigned int sfq_hash(const struct sfq_sched_data *q,
159 const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160{
Tom Herbertada1dba2015-05-01 11:30:16 -0700161 return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162}
163
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800164static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
165 int *qerr)
166{
167 struct sfq_sched_data *q = qdisc_priv(sch);
168 struct tcf_result res;
John Fastabend25d8c0d2014-09-12 20:05:27 -0700169 struct tcf_proto *fl;
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800170 int result;
171
172 if (TC_H_MAJ(skb->priority) == sch->handle &&
173 TC_H_MIN(skb->priority) > 0 &&
Eric Dumazet817fb152011-01-20 00:14:58 +0000174 TC_H_MIN(skb->priority) <= q->divisor)
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800175 return TC_H_MIN(skb->priority);
176
John Fastabend25d8c0d2014-09-12 20:05:27 -0700177 fl = rcu_dereference_bh(q->filter_list);
Tom Herbertada1dba2015-05-01 11:30:16 -0700178 if (!fl)
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800179 return sfq_hash(q, skb) + 1;
180
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700181 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Daniel Borkmann3b3ae882015-08-26 23:00:06 +0200182 result = tc_classify(skb, fl, &res, false);
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800183 if (result >= 0) {
184#ifdef CONFIG_NET_CLS_ACT
185 switch (result) {
186 case TC_ACT_STOLEN:
187 case TC_ACT_QUEUED:
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700188 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800189 case TC_ACT_SHOT:
190 return 0;
191 }
192#endif
Eric Dumazet817fb152011-01-20 00:14:58 +0000193 if (TC_H_MIN(res.classid) <= q->divisor)
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800194 return TC_H_MIN(res.classid);
195 }
196 return 0;
197}
198
Eric Dumazeteda83e32010-12-20 12:54:58 +0000199/*
Eric Dumazet18cb8092012-01-04 14:18:38 +0000200 * x : slot number [0 .. SFQ_MAX_FLOWS - 1]
Eric Dumazeteda83e32010-12-20 12:54:58 +0000201 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
203{
204 sfq_index p, n;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000205 struct sfq_slot *slot = &q->slots[x];
206 int qlen = slot->qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
Eric Dumazet18cb8092012-01-04 14:18:38 +0000208 p = qlen + SFQ_MAX_FLOWS;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000209 n = q->dep[qlen].next;
210
Eric Dumazet18cb8092012-01-04 14:18:38 +0000211 slot->dep.next = n;
212 slot->dep.prev = p;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000213
214 q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */
215 sfq_dep_head(q, n)->prev = x;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216}
217
Eric Dumazeteda83e32010-12-20 12:54:58 +0000218#define sfq_unlink(q, x, n, p) \
Yang Yingliangfa089432013-12-10 20:55:33 +0800219 do { \
220 n = q->slots[x].dep.next; \
221 p = q->slots[x].dep.prev; \
222 sfq_dep_head(q, p)->next = n; \
223 sfq_dep_head(q, n)->prev = p; \
224 } while (0)
Eric Dumazeteda83e32010-12-20 12:54:58 +0000225
226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
228{
229 sfq_index p, n;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000230 int d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
Eric Dumazeteda83e32010-12-20 12:54:58 +0000232 sfq_unlink(q, x, n, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
Eric Dumazeteda83e32010-12-20 12:54:58 +0000234 d = q->slots[x].qlen--;
235 if (n == p && q->cur_depth == d)
236 q->cur_depth--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 sfq_link(q, x);
238}
239
240static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
241{
242 sfq_index p, n;
243 int d;
244
Eric Dumazeteda83e32010-12-20 12:54:58 +0000245 sfq_unlink(q, x, n, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
Eric Dumazeteda83e32010-12-20 12:54:58 +0000247 d = ++q->slots[x].qlen;
248 if (q->cur_depth < d)
249 q->cur_depth = d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 sfq_link(q, x);
251}
252
Eric Dumazeteda83e32010-12-20 12:54:58 +0000253/* helper functions : might be changed when/if skb use a standard list_head */
254
255/* remove one skb from tail of slot queue */
256static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
257{
258 struct sk_buff *skb = slot->skblist_prev;
259
260 slot->skblist_prev = skb->prev;
Eric Dumazetee09b3c2010-12-22 11:39:59 -0800261 skb->prev->next = (struct sk_buff *)slot;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000262 skb->next = skb->prev = NULL;
263 return skb;
264}
265
266/* remove one skb from head of slot queue */
267static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
268{
269 struct sk_buff *skb = slot->skblist_next;
270
271 slot->skblist_next = skb->next;
Eric Dumazet18c8d822010-12-31 12:48:55 -0800272 skb->next->prev = (struct sk_buff *)slot;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000273 skb->next = skb->prev = NULL;
274 return skb;
275}
276
277static inline void slot_queue_init(struct sfq_slot *slot)
278{
Eric Dumazet18cb8092012-01-04 14:18:38 +0000279 memset(slot, 0, sizeof(*slot));
Eric Dumazeteda83e32010-12-20 12:54:58 +0000280 slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
281}
282
283/* add skb to slot queue (tail add) */
284static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
285{
286 skb->prev = slot->skblist_prev;
287 skb->next = (struct sk_buff *)slot;
288 slot->skblist_prev->next = skb;
289 slot->skblist_prev = skb;
290}
291
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292static unsigned int sfq_drop(struct Qdisc *sch)
293{
294 struct sfq_sched_data *q = qdisc_priv(sch);
Eric Dumazeteda83e32010-12-20 12:54:58 +0000295 sfq_index x, d = q->cur_depth;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 struct sk_buff *skb;
297 unsigned int len;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000298 struct sfq_slot *slot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
Eric Dumazeteda83e32010-12-20 12:54:58 +0000300 /* Queue is full! Find the longest slot and drop tail packet from it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 if (d > 1) {
Eric Dumazeteda83e32010-12-20 12:54:58 +0000302 x = q->dep[d].next;
303 slot = &q->slots[x];
304drop:
Eric Dumazet18cb8092012-01-04 14:18:38 +0000305 skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot);
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700306 len = qdisc_pkt_len(skb);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000307 slot->backlog -= len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 sfq_dec(q, x);
309 sch->q.qlen--;
John Fastabend25331d62014-09-28 11:53:29 -0700310 qdisc_qstats_drop(sch);
311 qdisc_qstats_backlog_dec(sch, skb);
WANG Conge8d092aa2015-07-14 11:21:57 -0700312 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 return len;
314 }
315
316 if (d == 1) {
317 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
Eric Dumazeteda83e32010-12-20 12:54:58 +0000318 x = q->tail->next;
319 slot = &q->slots[x];
320 q->tail->next = slot->next;
321 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
322 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 }
324
325 return 0;
326}
327
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000328/* Is ECN parameter configured */
329static int sfq_prob_mark(const struct sfq_sched_data *q)
330{
331 return q->flags & TC_RED_ECN;
332}
333
334/* Should packets over max threshold just be marked */
335static int sfq_hard_mark(const struct sfq_sched_data *q)
336{
337 return (q->flags & (TC_RED_ECN | TC_RED_HARDDROP)) == TC_RED_ECN;
338}
339
340static int sfq_headdrop(const struct sfq_sched_data *q)
341{
342 return q->headdrop;
343}
344
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345static int
Eric Dumazet520ac302016-06-21 23:16:49 -0700346sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347{
348 struct sfq_sched_data *q = qdisc_priv(sch);
WANG Cong2ccccf52016-02-25 14:55:01 -0800349 unsigned int hash, dropped;
Eric Dumazet8efa8852011-05-23 11:02:42 +0000350 sfq_index x, qlen;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000351 struct sfq_slot *slot;
Jarek Poplawski7f3ff4f2008-12-21 20:14:48 -0800352 int uninitialized_var(ret);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000353 struct sk_buff *head;
354 int delta;
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800355
356 hash = sfq_classify(skb, sch, &ret);
357 if (hash == 0) {
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700358 if (ret & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -0700359 qdisc_qstats_drop(sch);
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800360 kfree_skb(skb);
361 return ret;
362 }
363 hash--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364
365 x = q->ht[hash];
Eric Dumazeteda83e32010-12-20 12:54:58 +0000366 slot = &q->slots[x];
367 if (x == SFQ_EMPTY_SLOT) {
368 x = q->dep[0].next; /* get a free slot */
Eric Dumazet18cb8092012-01-04 14:18:38 +0000369 if (x >= SFQ_MAX_FLOWS)
Eric Dumazet520ac302016-06-21 23:16:49 -0700370 return qdisc_drop(skb, sch, to_free);
Eric Dumazeteda83e32010-12-20 12:54:58 +0000371 q->ht[hash] = x;
372 slot = &q->slots[x];
373 slot->hash = hash;
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000374 slot->backlog = 0; /* should already be 0 anyway... */
375 red_set_vars(&slot->vars);
376 goto enqueue;
377 }
378 if (q->red_parms) {
379 slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms,
380 &slot->vars,
381 slot->backlog);
382 switch (red_action(q->red_parms,
383 &slot->vars,
384 slot->vars.qavg)) {
385 case RED_DONT_MARK:
386 break;
387
388 case RED_PROB_MARK:
John Fastabend25331d62014-09-28 11:53:29 -0700389 qdisc_qstats_overlimit(sch);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000390 if (sfq_prob_mark(q)) {
391 /* We know we have at least one packet in queue */
392 if (sfq_headdrop(q) &&
393 INET_ECN_set_ce(slot->skblist_next)) {
394 q->stats.prob_mark_head++;
395 break;
396 }
397 if (INET_ECN_set_ce(skb)) {
398 q->stats.prob_mark++;
399 break;
400 }
401 }
402 q->stats.prob_drop++;
403 goto congestion_drop;
404
405 case RED_HARD_MARK:
John Fastabend25331d62014-09-28 11:53:29 -0700406 qdisc_qstats_overlimit(sch);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000407 if (sfq_hard_mark(q)) {
408 /* We know we have at least one packet in queue */
409 if (sfq_headdrop(q) &&
410 INET_ECN_set_ce(slot->skblist_next)) {
411 q->stats.forced_mark_head++;
412 break;
413 }
414 if (INET_ECN_set_ce(skb)) {
415 q->stats.forced_mark++;
416 break;
417 }
418 }
419 q->stats.forced_drop++;
420 goto congestion_drop;
421 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 }
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800423
Eric Dumazet18cb8092012-01-04 14:18:38 +0000424 if (slot->qlen >= q->maxdepth) {
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000425congestion_drop:
426 if (!sfq_headdrop(q))
Eric Dumazet520ac302016-06-21 23:16:49 -0700427 return qdisc_drop(skb, sch, to_free);
Eric Dumazet18cb8092012-01-04 14:18:38 +0000428
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000429 /* We know we have at least one packet in queue */
Eric Dumazet18cb8092012-01-04 14:18:38 +0000430 head = slot_dequeue_head(slot);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000431 delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb);
432 sch->qstats.backlog -= delta;
433 slot->backlog -= delta;
Eric Dumazet520ac302016-06-21 23:16:49 -0700434 qdisc_drop(head, sch, to_free);
Eric Dumazet18cb8092012-01-04 14:18:38 +0000435
Eric Dumazet18cb8092012-01-04 14:18:38 +0000436 slot_queue_add(slot, skb);
Konstantin Khlebnikov38530f62017-08-15 16:37:04 +0300437 qdisc_tree_reduce_backlog(sch, 0, delta);
Eric Dumazet18cb8092012-01-04 14:18:38 +0000438 return NET_XMIT_CN;
439 }
Alexey Kuznetsov32740dd2007-09-30 17:51:33 -0700440
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000441enqueue:
John Fastabend25331d62014-09-28 11:53:29 -0700442 qdisc_qstats_backlog_inc(sch, skb);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000443 slot->backlog += qdisc_pkt_len(skb);
Eric Dumazeteda83e32010-12-20 12:54:58 +0000444 slot_queue_add(slot, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 sfq_inc(q, x);
Eric Dumazeteda83e32010-12-20 12:54:58 +0000446 if (slot->qlen == 1) { /* The flow is new */
447 if (q->tail == NULL) { /* It is the first flow */
448 slot->next = x;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 } else {
Eric Dumazeteda83e32010-12-20 12:54:58 +0000450 slot->next = q->tail->next;
451 q->tail->next = x;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 }
Eric Dumazetcc34eb62012-03-13 18:04:25 +0000453 /* We put this flow at the end of our flow list.
454 * This might sound unfair for a new flow to wait after old ones,
455 * but we could endup servicing new flows only, and freeze old ones.
456 */
457 q->tail = slot;
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000458 /* We could use a bigger initial quantum for new flows */
Eric Dumazeteeaeb062010-12-28 21:53:33 +0000459 slot->allot = q->scaled_quantum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 }
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800461 if (++sch->q.qlen <= q->limit)
Ben Greear9871e502010-08-10 01:45:40 -0700462 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
Eric Dumazet8efa8852011-05-23 11:02:42 +0000464 qlen = slot->qlen;
WANG Cong2ccccf52016-02-25 14:55:01 -0800465 dropped = sfq_drop(sch);
Eric Dumazet8efa8852011-05-23 11:02:42 +0000466 /* Return Congestion Notification only if we dropped a packet
467 * from this flow.
468 */
Konstantin Khlebnikov38530f62017-08-15 16:37:04 +0300469 if (qlen != slot->qlen) {
470 qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
Eric Dumazete1738bd2011-07-29 19:22:42 +0000471 return NET_XMIT_CN;
Konstantin Khlebnikov38530f62017-08-15 16:37:04 +0300472 }
Eric Dumazete1738bd2011-07-29 19:22:42 +0000473
474 /* As we dropped a packet, better let upper stack know this */
WANG Cong2ccccf52016-02-25 14:55:01 -0800475 qdisc_tree_reduce_backlog(sch, 1, dropped);
Eric Dumazete1738bd2011-07-29 19:22:42 +0000476 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477}
478
Patrick McHardy48a8f512008-10-31 00:44:18 -0700479static struct sk_buff *
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800480sfq_dequeue(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481{
482 struct sfq_sched_data *q = qdisc_priv(sch);
483 struct sk_buff *skb;
Eric Dumazetaa3e2192010-12-20 13:18:16 -0800484 sfq_index a, next_a;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000485 struct sfq_slot *slot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486
487 /* No active slots */
Eric Dumazeteda83e32010-12-20 12:54:58 +0000488 if (q->tail == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 return NULL;
490
Eric Dumazeteeaeb062010-12-28 21:53:33 +0000491next_slot:
Eric Dumazeteda83e32010-12-20 12:54:58 +0000492 a = q->tail->next;
493 slot = &q->slots[a];
Eric Dumazeteeaeb062010-12-28 21:53:33 +0000494 if (slot->allot <= 0) {
495 q->tail = slot;
496 slot->allot += q->scaled_quantum;
497 goto next_slot;
498 }
Eric Dumazeteda83e32010-12-20 12:54:58 +0000499 skb = slot_dequeue_head(slot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 sfq_dec(q, a);
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800501 qdisc_bstats_update(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 sch->q.qlen--;
John Fastabend25331d62014-09-28 11:53:29 -0700503 qdisc_qstats_backlog_dec(sch, skb);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000504 slot->backlog -= qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 /* Is the slot empty? */
Eric Dumazeteda83e32010-12-20 12:54:58 +0000506 if (slot->qlen == 0) {
507 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
508 next_a = slot->next;
Eric Dumazetaa3e2192010-12-20 13:18:16 -0800509 if (a == next_a) {
Eric Dumazeteda83e32010-12-20 12:54:58 +0000510 q->tail = NULL; /* no more active slots */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 return skb;
512 }
Eric Dumazeteda83e32010-12-20 12:54:58 +0000513 q->tail->next = next_a;
Eric Dumazeteeaeb062010-12-28 21:53:33 +0000514 } else {
515 slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 }
517 return skb;
518}
519
520static void
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800521sfq_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522{
523 struct sk_buff *skb;
524
525 while ((skb = sfq_dequeue(sch)) != NULL)
Eric Dumazetfea02472016-06-13 20:21:59 -0700526 rtnl_kfree_skbs(skb, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527}
528
Eric Dumazet225d9b82011-12-21 03:30:11 +0000529/*
530 * When q->perturbation is changed, we rehash all queued skbs
531 * to avoid OOO (Out Of Order) effects.
532 * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change
533 * counters.
534 */
Eric Dumazet18cb8092012-01-04 14:18:38 +0000535static void sfq_rehash(struct Qdisc *sch)
Eric Dumazet225d9b82011-12-21 03:30:11 +0000536{
Eric Dumazet18cb8092012-01-04 14:18:38 +0000537 struct sfq_sched_data *q = qdisc_priv(sch);
Eric Dumazet225d9b82011-12-21 03:30:11 +0000538 struct sk_buff *skb;
539 int i;
540 struct sfq_slot *slot;
541 struct sk_buff_head list;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000542 int dropped = 0;
WANG Cong2ccccf52016-02-25 14:55:01 -0800543 unsigned int drop_len = 0;
Eric Dumazet225d9b82011-12-21 03:30:11 +0000544
545 __skb_queue_head_init(&list);
546
Eric Dumazet18cb8092012-01-04 14:18:38 +0000547 for (i = 0; i < q->maxflows; i++) {
Eric Dumazet225d9b82011-12-21 03:30:11 +0000548 slot = &q->slots[i];
549 if (!slot->qlen)
550 continue;
551 while (slot->qlen) {
552 skb = slot_dequeue_head(slot);
553 sfq_dec(q, i);
554 __skb_queue_tail(&list, skb);
555 }
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000556 slot->backlog = 0;
557 red_set_vars(&slot->vars);
Eric Dumazet225d9b82011-12-21 03:30:11 +0000558 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
559 }
560 q->tail = NULL;
561
562 while ((skb = __skb_dequeue(&list)) != NULL) {
563 unsigned int hash = sfq_hash(q, skb);
564 sfq_index x = q->ht[hash];
565
566 slot = &q->slots[x];
567 if (x == SFQ_EMPTY_SLOT) {
568 x = q->dep[0].next; /* get a free slot */
Eric Dumazet18cb8092012-01-04 14:18:38 +0000569 if (x >= SFQ_MAX_FLOWS) {
John Fastabend25331d62014-09-28 11:53:29 -0700570drop:
571 qdisc_qstats_backlog_dec(sch, skb);
WANG Cong2ccccf52016-02-25 14:55:01 -0800572 drop_len += qdisc_pkt_len(skb);
Eric Dumazet18cb8092012-01-04 14:18:38 +0000573 kfree_skb(skb);
574 dropped++;
575 continue;
576 }
Eric Dumazet225d9b82011-12-21 03:30:11 +0000577 q->ht[hash] = x;
578 slot = &q->slots[x];
579 slot->hash = hash;
580 }
Eric Dumazet18cb8092012-01-04 14:18:38 +0000581 if (slot->qlen >= q->maxdepth)
582 goto drop;
Eric Dumazet225d9b82011-12-21 03:30:11 +0000583 slot_queue_add(slot, skb);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000584 if (q->red_parms)
585 slot->vars.qavg = red_calc_qavg(q->red_parms,
586 &slot->vars,
587 slot->backlog);
588 slot->backlog += qdisc_pkt_len(skb);
Eric Dumazet225d9b82011-12-21 03:30:11 +0000589 sfq_inc(q, x);
590 if (slot->qlen == 1) { /* The flow is new */
591 if (q->tail == NULL) { /* It is the first flow */
592 slot->next = x;
593 } else {
594 slot->next = q->tail->next;
595 q->tail->next = x;
596 }
597 q->tail = slot;
598 slot->allot = q->scaled_quantum;
599 }
600 }
Eric Dumazet18cb8092012-01-04 14:18:38 +0000601 sch->q.qlen -= dropped;
WANG Cong2ccccf52016-02-25 14:55:01 -0800602 qdisc_tree_reduce_backlog(sch, dropped, drop_len);
Eric Dumazet225d9b82011-12-21 03:30:11 +0000603}
604
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605static void sfq_perturbation(unsigned long arg)
606{
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800607 struct Qdisc *sch = (struct Qdisc *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 struct sfq_sched_data *q = qdisc_priv(sch);
Eric Dumazet225d9b82011-12-21 03:30:11 +0000609 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
Eric Dumazet225d9b82011-12-21 03:30:11 +0000611 spin_lock(root_lock);
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500612 q->perturbation = prandom_u32();
Eric Dumazet225d9b82011-12-21 03:30:11 +0000613 if (!q->filter_list && q->tail)
Eric Dumazet18cb8092012-01-04 14:18:38 +0000614 sfq_rehash(sch);
Eric Dumazet225d9b82011-12-21 03:30:11 +0000615 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
Alexey Kuznetsov32740dd2007-09-30 17:51:33 -0700617 if (q->perturb_period)
618 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619}
620
Patrick McHardy1e904742008-01-22 22:11:17 -0800621static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622{
623 struct sfq_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800624 struct tc_sfq_qopt *ctl = nla_data(opt);
Eric Dumazet18cb8092012-01-04 14:18:38 +0000625 struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
WANG Cong2ccccf52016-02-25 14:55:01 -0800626 unsigned int qlen, dropped = 0;
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000627 struct red_parms *p = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628
Patrick McHardy1e904742008-01-22 22:11:17 -0800629 if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 return -EINVAL;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000631 if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1)))
632 ctl_v1 = nla_data(opt);
stephen hemminger119b3d32011-02-02 15:19:51 +0000633 if (ctl->divisor &&
634 (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
635 return -EINVAL;
Nogah Frankela0514c02017-12-04 13:31:11 +0200636 if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
637 ctl_v1->Wlog))
638 return -EINVAL;
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000639 if (ctl_v1 && ctl_v1->qth_min) {
640 p = kmalloc(sizeof(*p), GFP_KERNEL);
641 if (!p)
642 return -ENOMEM;
643 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 sch_tree_lock(sch);
Eric Dumazet18cb8092012-01-04 14:18:38 +0000645 if (ctl->quantum) {
646 q->quantum = ctl->quantum;
647 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
648 }
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800649 q->perturb_period = ctl->perturb_period * HZ;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000650 if (ctl->flows)
651 q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
652 if (ctl->divisor) {
Eric Dumazet817fb152011-01-20 00:14:58 +0000653 q->divisor = ctl->divisor;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000654 q->maxflows = min_t(u32, q->maxflows, q->divisor);
655 }
656 if (ctl_v1) {
657 if (ctl_v1->depth)
658 q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000659 if (p) {
660 swap(q->red_parms, p);
661 red_set_parms(q->red_parms,
662 ctl_v1->qth_min, ctl_v1->qth_max,
663 ctl_v1->Wlog,
664 ctl_v1->Plog, ctl_v1->Scell_log,
665 NULL,
666 ctl_v1->max_P);
667 }
668 q->flags = ctl_v1->flags;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000669 q->headdrop = ctl_v1->headdrop;
670 }
671 if (ctl->limit) {
672 q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows);
673 q->maxflows = min_t(u32, q->maxflows, q->limit);
674 }
675
Patrick McHardy5e50da02006-11-29 17:36:20 -0800676 qlen = sch->q.qlen;
Alexey Kuznetsov5588b402007-09-19 10:42:03 -0700677 while (sch->q.qlen > q->limit)
WANG Cong2ccccf52016-02-25 14:55:01 -0800678 dropped += sfq_drop(sch);
679 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
681 del_timer(&q->perturb_timer);
682 if (q->perturb_period) {
Alexey Kuznetsov32740dd2007-09-30 17:51:33 -0700683 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500684 q->perturbation = prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 }
686 sch_tree_unlock(sch);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000687 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 return 0;
689}
690
Eric Dumazetbd16a6c2012-01-04 06:22:24 +0000691static void *sfq_alloc(size_t sz)
692{
693 void *ptr = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN);
694
695 if (!ptr)
696 ptr = vmalloc(sz);
697 return ptr;
698}
699
700static void sfq_free(void *addr)
701{
WANG Cong4cb28972014-06-02 15:55:22 -0700702 kvfree(addr);
Eric Dumazetbd16a6c2012-01-04 06:22:24 +0000703}
704
705static void sfq_destroy(struct Qdisc *sch)
706{
707 struct sfq_sched_data *q = qdisc_priv(sch);
708
709 tcf_destroy_chain(&q->filter_list);
710 q->perturb_period = 0;
711 del_timer_sync(&q->perturb_timer);
712 sfq_free(q->ht);
Eric Dumazet18cb8092012-01-04 14:18:38 +0000713 sfq_free(q->slots);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000714 kfree(q->red_parms);
Eric Dumazetbd16a6c2012-01-04 06:22:24 +0000715}
716
Patrick McHardy1e904742008-01-22 22:11:17 -0800717static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718{
719 struct sfq_sched_data *q = qdisc_priv(sch);
720 int i;
721
Stephen Hemmingerd3e99482008-01-20 17:18:45 -0800722 q->perturb_timer.function = sfq_perturbation;
Fernando Carrijoc19a28e2009-01-07 18:09:08 -0800723 q->perturb_timer.data = (unsigned long)sch;
Stephen Hemmingerd3e99482008-01-20 17:18:45 -0800724 init_timer_deferrable(&q->perturb_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
Eric Dumazet18cb8092012-01-04 14:18:38 +0000726 for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) {
727 q->dep[i].next = i + SFQ_MAX_FLOWS;
728 q->dep[i].prev = i + SFQ_MAX_FLOWS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 }
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800730
Eric Dumazet18cb8092012-01-04 14:18:38 +0000731 q->limit = SFQ_MAX_DEPTH;
732 q->maxdepth = SFQ_MAX_DEPTH;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000733 q->cur_depth = 0;
734 q->tail = NULL;
Eric Dumazet817fb152011-01-20 00:14:58 +0000735 q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000736 q->maxflows = SFQ_DEFAULT_FLOWS;
Eric Dumazet02a90982012-01-04 06:23:01 +0000737 q->quantum = psched_mtu(qdisc_dev(sch));
738 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
739 q->perturb_period = 0;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500740 q->perturbation = prandom_u32();
Eric Dumazet02a90982012-01-04 06:23:01 +0000741
742 if (opt) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 int err = sfq_change(sch, opt);
744 if (err)
745 return err;
746 }
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800747
Eric Dumazetbd16a6c2012-01-04 06:22:24 +0000748 q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
Eric Dumazet18cb8092012-01-04 14:18:38 +0000749 q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
750 if (!q->ht || !q->slots) {
Eric Dumazet13550ff2017-02-10 10:31:49 -0800751 /* Note: sfq_destroy() will be called by our caller */
Eric Dumazet817fb152011-01-20 00:14:58 +0000752 return -ENOMEM;
Eric Dumazetbd16a6c2012-01-04 06:22:24 +0000753 }
Eric Dumazet13550ff2017-02-10 10:31:49 -0800754
Eric Dumazet817fb152011-01-20 00:14:58 +0000755 for (i = 0; i < q->divisor; i++)
756 q->ht[i] = SFQ_EMPTY_SLOT;
757
Eric Dumazet18cb8092012-01-04 14:18:38 +0000758 for (i = 0; i < q->maxflows; i++) {
Eric Dumazet18c8d822010-12-31 12:48:55 -0800759 slot_queue_init(&q->slots[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 sfq_link(q, i);
Eric Dumazet18c8d822010-12-31 12:48:55 -0800761 }
Eric Dumazet23624932011-01-21 16:26:09 -0800762 if (q->limit >= 1)
763 sch->flags |= TCQ_F_CAN_BYPASS;
764 else
765 sch->flags &= ~TCQ_F_CAN_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 return 0;
767}
768
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
770{
771 struct sfq_sched_data *q = qdisc_priv(sch);
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700772 unsigned char *b = skb_tail_pointer(skb);
Eric Dumazet18cb8092012-01-04 14:18:38 +0000773 struct tc_sfq_qopt_v1 opt;
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000774 struct red_parms *p = q->red_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775
Eric Dumazet18cb8092012-01-04 14:18:38 +0000776 memset(&opt, 0, sizeof(opt));
777 opt.v0.quantum = q->quantum;
778 opt.v0.perturb_period = q->perturb_period / HZ;
779 opt.v0.limit = q->limit;
780 opt.v0.divisor = q->divisor;
781 opt.v0.flows = q->maxflows;
782 opt.depth = q->maxdepth;
783 opt.headdrop = q->headdrop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000785 if (p) {
786 opt.qth_min = p->qth_min >> p->Wlog;
787 opt.qth_max = p->qth_max >> p->Wlog;
788 opt.Wlog = p->Wlog;
789 opt.Plog = p->Plog;
790 opt.Scell_log = p->Scell_log;
791 opt.max_P = p->max_P;
792 }
793 memcpy(&opt.stats, &q->stats, sizeof(opt.stats));
794 opt.flags = q->flags;
795
David S. Miller1b34ec42012-03-29 05:11:39 -0400796 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
797 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798
799 return skb->len;
800
Patrick McHardy1e904742008-01-22 22:11:17 -0800801nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -0700802 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 return -1;
804}
805
Jarek Poplawski41065fb2010-08-10 22:31:02 +0000806static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
807{
808 return NULL;
809}
810
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800811static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
812{
813 return 0;
814}
815
Jarek Poplawskieb4a5522010-08-06 00:22:35 +0000816static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
817 u32 classid)
818{
Eric Dumazet23624932011-01-21 16:26:09 -0800819 /* we cannot bypass queue discipline anymore */
820 sch->flags &= ~TCQ_F_CAN_BYPASS;
Jarek Poplawskieb4a5522010-08-06 00:22:35 +0000821 return 0;
822}
823
Jarek Poplawskida7115d2010-08-09 12:18:17 +0000824static void sfq_put(struct Qdisc *q, unsigned long cl)
825{
826}
827
John Fastabend25d8c0d2014-09-12 20:05:27 -0700828static struct tcf_proto __rcu **sfq_find_tcf(struct Qdisc *sch,
829 unsigned long cl)
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800830{
831 struct sfq_sched_data *q = qdisc_priv(sch);
832
833 if (cl)
834 return NULL;
835 return &q->filter_list;
836}
837
Patrick McHardy94de78d2008-01-31 18:37:16 -0800838static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
839 struct sk_buff *skb, struct tcmsg *tcm)
840{
841 tcm->tcm_handle |= TC_H_MIN(cl);
842 return 0;
843}
844
845static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
846 struct gnet_dump *d)
847{
848 struct sfq_sched_data *q = qdisc_priv(sch);
Eric Dumazetee09b3c2010-12-22 11:39:59 -0800849 sfq_index idx = q->ht[cl - 1];
850 struct gnet_stats_queue qs = { 0 };
851 struct tc_sfq_xstats xstats = { 0 };
Eric Dumazetc4266262010-12-15 08:18:36 +0000852
Eric Dumazetee09b3c2010-12-22 11:39:59 -0800853 if (idx != SFQ_EMPTY_SLOT) {
854 const struct sfq_slot *slot = &q->slots[idx];
Patrick McHardy94de78d2008-01-31 18:37:16 -0800855
Eric Dumazeteeaeb062010-12-28 21:53:33 +0000856 xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
Eric Dumazetee09b3c2010-12-22 11:39:59 -0800857 qs.qlen = slot->qlen;
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000858 qs.backlog = slot->backlog;
Eric Dumazetee09b3c2010-12-22 11:39:59 -0800859 }
John Fastabendb0ab6f92014-09-28 11:54:24 -0700860 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
Patrick McHardy94de78d2008-01-31 18:37:16 -0800861 return -1;
862 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
863}
864
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800865static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
866{
Patrick McHardy94de78d2008-01-31 18:37:16 -0800867 struct sfq_sched_data *q = qdisc_priv(sch);
868 unsigned int i;
869
870 if (arg->stop)
871 return;
872
Eric Dumazet817fb152011-01-20 00:14:58 +0000873 for (i = 0; i < q->divisor; i++) {
Eric Dumazeteda83e32010-12-20 12:54:58 +0000874 if (q->ht[i] == SFQ_EMPTY_SLOT ||
Patrick McHardy94de78d2008-01-31 18:37:16 -0800875 arg->count < arg->skip) {
876 arg->count++;
877 continue;
878 }
879 if (arg->fn(sch, i + 1, arg) < 0) {
880 arg->stop = 1;
881 break;
882 }
883 arg->count++;
884 }
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800885}
886
887static const struct Qdisc_class_ops sfq_class_ops = {
Jarek Poplawski41065fb2010-08-10 22:31:02 +0000888 .leaf = sfq_leaf,
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800889 .get = sfq_get,
Jarek Poplawskida7115d2010-08-09 12:18:17 +0000890 .put = sfq_put,
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800891 .tcf_chain = sfq_find_tcf,
Jarek Poplawskieb4a5522010-08-06 00:22:35 +0000892 .bind_tcf = sfq_bind,
Jarek Poplawskida7115d2010-08-09 12:18:17 +0000893 .unbind_tcf = sfq_put,
Patrick McHardy94de78d2008-01-31 18:37:16 -0800894 .dump = sfq_dump_class,
895 .dump_stats = sfq_dump_class_stats,
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800896 .walk = sfq_walk,
897};
898
Eric Dumazet20fea082007-11-14 01:44:41 -0800899static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800900 .cl_ops = &sfq_class_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 .id = "sfq",
902 .priv_size = sizeof(struct sfq_sched_data),
903 .enqueue = sfq_enqueue,
904 .dequeue = sfq_dequeue,
Eric Dumazet07bd8df2011-05-25 04:40:11 +0000905 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 .init = sfq_init,
907 .reset = sfq_reset,
908 .destroy = sfq_destroy,
909 .change = NULL,
910 .dump = sfq_dump,
911 .owner = THIS_MODULE,
912};
913
914static int __init sfq_module_init(void)
915{
916 return register_qdisc(&sfq_qdisc_ops);
917}
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900918static void __exit sfq_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919{
920 unregister_qdisc(&sfq_qdisc_ops);
921}
922module_init(sfq_module_init)
923module_exit(sfq_module_exit)
924MODULE_LICENSE("GPL");