blob: 52f75a5473e120f8d0a02a2ff1c0215ff02437b5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_sfq.c Stochastic Fairness Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/jiffies.h>
16#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/in.h>
18#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/init.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070020#include <linux/skbuff.h>
Alexey Kuznetsov32740dd2007-09-30 17:51:33 -070021#include <linux/jhash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Eric Dumazet817fb152011-01-20 00:14:58 +000023#include <linux/vmalloc.h>
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070024#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <net/pkt_sched.h>
Eric Dumazetddecf0f2012-01-06 06:31:44 +000026#include <net/red.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28
29/* Stochastic Fairness Queuing algorithm.
30 =======================================
31
32 Source:
33 Paul E. McKenney "Stochastic Fairness Queuing",
34 IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.
35
36 Paul E. McKenney "Stochastic Fairness Queuing",
37 "Interworking: Research and Experience", v.2, 1991, p.113-131.
38
39
40 See also:
41 M. Shreedhar and George Varghese "Efficient Fair
42 Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
43
44
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090045 This is not the thing that is usually called (W)FQ nowadays.
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 It does not use any timestamp mechanism, but instead
47 processes queues in round-robin order.
48
49 ADVANTAGE:
50
51 - It is very cheap. Both CPU and memory requirements are minimal.
52
53 DRAWBACKS:
54
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090055 - "Stochastic" -> It is not 100% fair.
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 When hash collisions occur, several flows are considered as one.
57
58 - "Round-robin" -> It introduces larger delays than virtual clock
59 based schemes, and should not be used for isolating interactive
60 traffic from non-interactive. It means, that this scheduler
61 should be used as leaf of CBQ or P3, which put interactive traffic
62 to higher priority band.
63
64 We still need true WFQ for top level CSZ, but using WFQ
65 for the best effort traffic is absolutely pointless:
66 SFQ is superior for this purpose.
67
68 IMPLEMENTATION:
Eric Dumazet18cb8092012-01-04 14:18:38 +000069 This implementation limits :
70 - maximal queue length per flow to 127 packets.
71 - max mtu to 2^18-1;
72 - max 65408 flows,
73 - number of hash buckets to 65536.
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75 It is easy to increase these values, but not in flight. */
76
Eric Dumazet18cb8092012-01-04 14:18:38 +000077#define SFQ_MAX_DEPTH 127 /* max number of packets per flow */
78#define SFQ_DEFAULT_FLOWS 128
79#define SFQ_MAX_FLOWS (0x10000 - SFQ_MAX_DEPTH - 1) /* max number of flows */
80#define SFQ_EMPTY_SLOT 0xffff
Eric Dumazet817fb152011-01-20 00:14:58 +000081#define SFQ_DEFAULT_HASH_DIVISOR 1024
82
Eric Dumazeteeaeb062010-12-28 21:53:33 +000083/* We use 16 bits to store allot, and want to handle packets up to 64K
84 * Scale allot by 8 (1<<3) so that no overflow occurs.
85 */
86#define SFQ_ALLOT_SHIFT 3
87#define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Eric Dumazet18cb8092012-01-04 14:18:38 +000089/* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */
90typedef u16 sfq_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
Eric Dumazeteda83e32010-12-20 12:54:58 +000092/*
93 * We dont use pointers to save space.
Eric Dumazet18cb8092012-01-04 14:18:38 +000094 * Small indexes [0 ... SFQ_MAX_FLOWS - 1] are 'pointers' to slots[] array
95 * while following values [SFQ_MAX_FLOWS ... SFQ_MAX_FLOWS + SFQ_MAX_DEPTH]
Eric Dumazeteda83e32010-12-20 12:54:58 +000096 * are 'pointers' to dep[] array
97 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +000098struct sfq_head {
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 sfq_index next;
100 sfq_index prev;
101};
102
Eric Dumazeteda83e32010-12-20 12:54:58 +0000103struct sfq_slot {
104 struct sk_buff *skblist_next;
105 struct sk_buff *skblist_prev;
106 sfq_index qlen; /* number of skbs in skblist */
Eric Dumazet18cb8092012-01-04 14:18:38 +0000107 sfq_index next; /* next slot in sfq RR chain */
Eric Dumazeteda83e32010-12-20 12:54:58 +0000108 struct sfq_head dep; /* anchor in dep[] chains */
109 unsigned short hash; /* hash value (index in ht[]) */
110 short allot; /* credit for this slot */
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000111
112 unsigned int backlog;
113 struct red_vars vars;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000114};
115
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000116struct sfq_sched_data {
Eric Dumazet18cb8092012-01-04 14:18:38 +0000117/* frequently used fields */
118 int limit; /* limit of total number of packets in this qdisc */
Eric Dumazet817fb152011-01-20 00:14:58 +0000119 unsigned int divisor; /* number of slots in hash table */
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000120 u8 headdrop;
121 u8 maxdepth; /* limit of packets per flow */
Eric Dumazet18cb8092012-01-04 14:18:38 +0000122
Alexey Kuznetsov32740dd2007-09-30 17:51:33 -0700123 u32 perturbation;
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000124 u8 cur_depth; /* depth of longest slot */
125 u8 flags;
Eric Dumazeteeaeb062010-12-28 21:53:33 +0000126 unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
John Fastabend25d8c0d2014-09-12 20:05:27 -0700127 struct tcf_proto __rcu *filter_list;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000128 sfq_index *ht; /* Hash table ('divisor' slots) */
129 struct sfq_slot *slots; /* Flows table ('maxflows' entries) */
130
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000131 struct red_parms *red_parms;
132 struct tc_sfqred_stats stats;
133 struct sfq_slot *tail; /* current slot in round */
134
Eric Dumazet18cb8092012-01-04 14:18:38 +0000135 struct sfq_head dep[SFQ_MAX_DEPTH + 1];
136 /* Linked lists of slots, indexed by depth
137 * dep[0] : list of unused flows
138 * dep[1] : list of flows with 1 packet
139 * dep[X] : list of flows with X packets
140 */
141
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000142 unsigned int maxflows; /* number of flows in flows array */
Eric Dumazet18cb8092012-01-04 14:18:38 +0000143 int perturb_period;
144 unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
145 struct timer_list perturb_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146};
147
Eric Dumazeteda83e32010-12-20 12:54:58 +0000148/*
149 * sfq_head are either in a sfq_slot or in dep[] array
150 */
151static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
152{
Eric Dumazet18cb8092012-01-04 14:18:38 +0000153 if (val < SFQ_MAX_FLOWS)
Eric Dumazeteda83e32010-12-20 12:54:58 +0000154 return &q->slots[val].dep;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000155 return &q->dep[val - SFQ_MAX_FLOWS];
Eric Dumazeteda83e32010-12-20 12:54:58 +0000156}
157
Eric Dumazet11fca932011-11-29 03:40:45 +0000158static unsigned int sfq_hash(const struct sfq_sched_data *q,
159 const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160{
Tom Herbertada1dba2015-05-01 11:30:16 -0700161 return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162}
163
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800164static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
165 int *qerr)
166{
167 struct sfq_sched_data *q = qdisc_priv(sch);
168 struct tcf_result res;
John Fastabend25d8c0d2014-09-12 20:05:27 -0700169 struct tcf_proto *fl;
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800170 int result;
171
172 if (TC_H_MAJ(skb->priority) == sch->handle &&
173 TC_H_MIN(skb->priority) > 0 &&
Eric Dumazet817fb152011-01-20 00:14:58 +0000174 TC_H_MIN(skb->priority) <= q->divisor)
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800175 return TC_H_MIN(skb->priority);
176
John Fastabend25d8c0d2014-09-12 20:05:27 -0700177 fl = rcu_dereference_bh(q->filter_list);
Tom Herbertada1dba2015-05-01 11:30:16 -0700178 if (!fl)
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800179 return sfq_hash(q, skb) + 1;
180
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700181 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
John Fastabend25d8c0d2014-09-12 20:05:27 -0700182 result = tc_classify(skb, fl, &res);
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800183 if (result >= 0) {
184#ifdef CONFIG_NET_CLS_ACT
185 switch (result) {
186 case TC_ACT_STOLEN:
187 case TC_ACT_QUEUED:
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700188 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800189 case TC_ACT_SHOT:
190 return 0;
191 }
192#endif
Eric Dumazet817fb152011-01-20 00:14:58 +0000193 if (TC_H_MIN(res.classid) <= q->divisor)
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800194 return TC_H_MIN(res.classid);
195 }
196 return 0;
197}
198
Eric Dumazeteda83e32010-12-20 12:54:58 +0000199/*
Eric Dumazet18cb8092012-01-04 14:18:38 +0000200 * x : slot number [0 .. SFQ_MAX_FLOWS - 1]
Eric Dumazeteda83e32010-12-20 12:54:58 +0000201 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
203{
204 sfq_index p, n;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000205 struct sfq_slot *slot = &q->slots[x];
206 int qlen = slot->qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
Eric Dumazet18cb8092012-01-04 14:18:38 +0000208 p = qlen + SFQ_MAX_FLOWS;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000209 n = q->dep[qlen].next;
210
Eric Dumazet18cb8092012-01-04 14:18:38 +0000211 slot->dep.next = n;
212 slot->dep.prev = p;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000213
214 q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */
215 sfq_dep_head(q, n)->prev = x;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216}
217
Eric Dumazeteda83e32010-12-20 12:54:58 +0000218#define sfq_unlink(q, x, n, p) \
Yang Yingliangfa089432013-12-10 20:55:33 +0800219 do { \
220 n = q->slots[x].dep.next; \
221 p = q->slots[x].dep.prev; \
222 sfq_dep_head(q, p)->next = n; \
223 sfq_dep_head(q, n)->prev = p; \
224 } while (0)
Eric Dumazeteda83e32010-12-20 12:54:58 +0000225
226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
228{
229 sfq_index p, n;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000230 int d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
Eric Dumazeteda83e32010-12-20 12:54:58 +0000232 sfq_unlink(q, x, n, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
Eric Dumazeteda83e32010-12-20 12:54:58 +0000234 d = q->slots[x].qlen--;
235 if (n == p && q->cur_depth == d)
236 q->cur_depth--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 sfq_link(q, x);
238}
239
240static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
241{
242 sfq_index p, n;
243 int d;
244
Eric Dumazeteda83e32010-12-20 12:54:58 +0000245 sfq_unlink(q, x, n, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
Eric Dumazeteda83e32010-12-20 12:54:58 +0000247 d = ++q->slots[x].qlen;
248 if (q->cur_depth < d)
249 q->cur_depth = d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 sfq_link(q, x);
251}
252
Eric Dumazeteda83e32010-12-20 12:54:58 +0000253/* helper functions : might be changed when/if skb use a standard list_head */
254
255/* remove one skb from tail of slot queue */
256static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
257{
258 struct sk_buff *skb = slot->skblist_prev;
259
260 slot->skblist_prev = skb->prev;
Eric Dumazetee09b3c2010-12-22 11:39:59 -0800261 skb->prev->next = (struct sk_buff *)slot;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000262 skb->next = skb->prev = NULL;
263 return skb;
264}
265
266/* remove one skb from head of slot queue */
267static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
268{
269 struct sk_buff *skb = slot->skblist_next;
270
271 slot->skblist_next = skb->next;
Eric Dumazet18c8d822010-12-31 12:48:55 -0800272 skb->next->prev = (struct sk_buff *)slot;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000273 skb->next = skb->prev = NULL;
274 return skb;
275}
276
277static inline void slot_queue_init(struct sfq_slot *slot)
278{
Eric Dumazet18cb8092012-01-04 14:18:38 +0000279 memset(slot, 0, sizeof(*slot));
Eric Dumazeteda83e32010-12-20 12:54:58 +0000280 slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
281}
282
283/* add skb to slot queue (tail add) */
284static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
285{
286 skb->prev = slot->skblist_prev;
287 skb->next = (struct sk_buff *)slot;
288 slot->skblist_prev->next = skb;
289 slot->skblist_prev = skb;
290}
291
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292static unsigned int sfq_drop(struct Qdisc *sch)
293{
294 struct sfq_sched_data *q = qdisc_priv(sch);
Eric Dumazeteda83e32010-12-20 12:54:58 +0000295 sfq_index x, d = q->cur_depth;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 struct sk_buff *skb;
297 unsigned int len;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000298 struct sfq_slot *slot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
Eric Dumazeteda83e32010-12-20 12:54:58 +0000300 /* Queue is full! Find the longest slot and drop tail packet from it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 if (d > 1) {
Eric Dumazeteda83e32010-12-20 12:54:58 +0000302 x = q->dep[d].next;
303 slot = &q->slots[x];
304drop:
Eric Dumazet18cb8092012-01-04 14:18:38 +0000305 skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot);
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700306 len = qdisc_pkt_len(skb);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000307 slot->backlog -= len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 sfq_dec(q, x);
309 sch->q.qlen--;
John Fastabend25331d62014-09-28 11:53:29 -0700310 qdisc_qstats_drop(sch);
311 qdisc_qstats_backlog_dec(sch, skb);
WANG Conge8d092a2015-07-14 11:21:57 -0700312 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 return len;
314 }
315
316 if (d == 1) {
317 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
Eric Dumazeteda83e32010-12-20 12:54:58 +0000318 x = q->tail->next;
319 slot = &q->slots[x];
320 q->tail->next = slot->next;
321 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
322 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 }
324
325 return 0;
326}
327
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000328/* Is ECN parameter configured */
329static int sfq_prob_mark(const struct sfq_sched_data *q)
330{
331 return q->flags & TC_RED_ECN;
332}
333
334/* Should packets over max threshold just be marked */
335static int sfq_hard_mark(const struct sfq_sched_data *q)
336{
337 return (q->flags & (TC_RED_ECN | TC_RED_HARDDROP)) == TC_RED_ECN;
338}
339
340static int sfq_headdrop(const struct sfq_sched_data *q)
341{
342 return q->headdrop;
343}
344
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345static int
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800346sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347{
348 struct sfq_sched_data *q = qdisc_priv(sch);
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800349 unsigned int hash;
Eric Dumazet8efa8852011-05-23 11:02:42 +0000350 sfq_index x, qlen;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000351 struct sfq_slot *slot;
Jarek Poplawski7f3ff4f2008-12-21 20:14:48 -0800352 int uninitialized_var(ret);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000353 struct sk_buff *head;
354 int delta;
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800355
356 hash = sfq_classify(skb, sch, &ret);
357 if (hash == 0) {
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700358 if (ret & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -0700359 qdisc_qstats_drop(sch);
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800360 kfree_skb(skb);
361 return ret;
362 }
363 hash--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364
365 x = q->ht[hash];
Eric Dumazeteda83e32010-12-20 12:54:58 +0000366 slot = &q->slots[x];
367 if (x == SFQ_EMPTY_SLOT) {
368 x = q->dep[0].next; /* get a free slot */
Eric Dumazet18cb8092012-01-04 14:18:38 +0000369 if (x >= SFQ_MAX_FLOWS)
370 return qdisc_drop(skb, sch);
Eric Dumazeteda83e32010-12-20 12:54:58 +0000371 q->ht[hash] = x;
372 slot = &q->slots[x];
373 slot->hash = hash;
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000374 slot->backlog = 0; /* should already be 0 anyway... */
375 red_set_vars(&slot->vars);
376 goto enqueue;
377 }
378 if (q->red_parms) {
379 slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms,
380 &slot->vars,
381 slot->backlog);
382 switch (red_action(q->red_parms,
383 &slot->vars,
384 slot->vars.qavg)) {
385 case RED_DONT_MARK:
386 break;
387
388 case RED_PROB_MARK:
John Fastabend25331d62014-09-28 11:53:29 -0700389 qdisc_qstats_overlimit(sch);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000390 if (sfq_prob_mark(q)) {
391 /* We know we have at least one packet in queue */
392 if (sfq_headdrop(q) &&
393 INET_ECN_set_ce(slot->skblist_next)) {
394 q->stats.prob_mark_head++;
395 break;
396 }
397 if (INET_ECN_set_ce(skb)) {
398 q->stats.prob_mark++;
399 break;
400 }
401 }
402 q->stats.prob_drop++;
403 goto congestion_drop;
404
405 case RED_HARD_MARK:
John Fastabend25331d62014-09-28 11:53:29 -0700406 qdisc_qstats_overlimit(sch);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000407 if (sfq_hard_mark(q)) {
408 /* We know we have at least one packet in queue */
409 if (sfq_headdrop(q) &&
410 INET_ECN_set_ce(slot->skblist_next)) {
411 q->stats.forced_mark_head++;
412 break;
413 }
414 if (INET_ECN_set_ce(skb)) {
415 q->stats.forced_mark++;
416 break;
417 }
418 }
419 q->stats.forced_drop++;
420 goto congestion_drop;
421 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 }
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800423
Eric Dumazet18cb8092012-01-04 14:18:38 +0000424 if (slot->qlen >= q->maxdepth) {
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000425congestion_drop:
426 if (!sfq_headdrop(q))
Eric Dumazet18cb8092012-01-04 14:18:38 +0000427 return qdisc_drop(skb, sch);
428
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000429 /* We know we have at least one packet in queue */
Eric Dumazet18cb8092012-01-04 14:18:38 +0000430 head = slot_dequeue_head(slot);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000431 delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb);
432 sch->qstats.backlog -= delta;
433 slot->backlog -= delta;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000434 qdisc_drop(head, sch);
435
Eric Dumazet18cb8092012-01-04 14:18:38 +0000436 slot_queue_add(slot, skb);
437 return NET_XMIT_CN;
438 }
Alexey Kuznetsov32740dd2007-09-30 17:51:33 -0700439
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000440enqueue:
John Fastabend25331d62014-09-28 11:53:29 -0700441 qdisc_qstats_backlog_inc(sch, skb);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000442 slot->backlog += qdisc_pkt_len(skb);
Eric Dumazeteda83e32010-12-20 12:54:58 +0000443 slot_queue_add(slot, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 sfq_inc(q, x);
Eric Dumazeteda83e32010-12-20 12:54:58 +0000445 if (slot->qlen == 1) { /* The flow is new */
446 if (q->tail == NULL) { /* It is the first flow */
447 slot->next = x;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 } else {
Eric Dumazeteda83e32010-12-20 12:54:58 +0000449 slot->next = q->tail->next;
450 q->tail->next = x;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 }
Eric Dumazetcc34eb62012-03-13 18:04:25 +0000452 /* We put this flow at the end of our flow list.
453 * This might sound unfair for a new flow to wait after old ones,
454 * but we could endup servicing new flows only, and freeze old ones.
455 */
456 q->tail = slot;
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000457 /* We could use a bigger initial quantum for new flows */
Eric Dumazeteeaeb062010-12-28 21:53:33 +0000458 slot->allot = q->scaled_quantum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 }
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800460 if (++sch->q.qlen <= q->limit)
Ben Greear9871e502010-08-10 01:45:40 -0700461 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
Eric Dumazet8efa8852011-05-23 11:02:42 +0000463 qlen = slot->qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 sfq_drop(sch);
Eric Dumazet8efa8852011-05-23 11:02:42 +0000465 /* Return Congestion Notification only if we dropped a packet
466 * from this flow.
467 */
Eric Dumazete1738bd2011-07-29 19:22:42 +0000468 if (qlen != slot->qlen)
469 return NET_XMIT_CN;
470
471 /* As we dropped a packet, better let upper stack know this */
472 qdisc_tree_decrease_qlen(sch, 1);
473 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474}
475
Patrick McHardy48a8f512008-10-31 00:44:18 -0700476static struct sk_buff *
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800477sfq_dequeue(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478{
479 struct sfq_sched_data *q = qdisc_priv(sch);
480 struct sk_buff *skb;
Eric Dumazetaa3e2192010-12-20 13:18:16 -0800481 sfq_index a, next_a;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000482 struct sfq_slot *slot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
484 /* No active slots */
Eric Dumazeteda83e32010-12-20 12:54:58 +0000485 if (q->tail == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 return NULL;
487
Eric Dumazeteeaeb062010-12-28 21:53:33 +0000488next_slot:
Eric Dumazeteda83e32010-12-20 12:54:58 +0000489 a = q->tail->next;
490 slot = &q->slots[a];
Eric Dumazeteeaeb062010-12-28 21:53:33 +0000491 if (slot->allot <= 0) {
492 q->tail = slot;
493 slot->allot += q->scaled_quantum;
494 goto next_slot;
495 }
Eric Dumazeteda83e32010-12-20 12:54:58 +0000496 skb = slot_dequeue_head(slot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 sfq_dec(q, a);
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800498 qdisc_bstats_update(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 sch->q.qlen--;
John Fastabend25331d62014-09-28 11:53:29 -0700500 qdisc_qstats_backlog_dec(sch, skb);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000501 slot->backlog -= qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 /* Is the slot empty? */
Eric Dumazeteda83e32010-12-20 12:54:58 +0000503 if (slot->qlen == 0) {
504 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
505 next_a = slot->next;
Eric Dumazetaa3e2192010-12-20 13:18:16 -0800506 if (a == next_a) {
Eric Dumazeteda83e32010-12-20 12:54:58 +0000507 q->tail = NULL; /* no more active slots */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 return skb;
509 }
Eric Dumazeteda83e32010-12-20 12:54:58 +0000510 q->tail->next = next_a;
Eric Dumazeteeaeb062010-12-28 21:53:33 +0000511 } else {
512 slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 }
514 return skb;
515}
516
517static void
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800518sfq_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519{
520 struct sk_buff *skb;
521
522 while ((skb = sfq_dequeue(sch)) != NULL)
523 kfree_skb(skb);
524}
525
Eric Dumazet225d9b82011-12-21 03:30:11 +0000526/*
527 * When q->perturbation is changed, we rehash all queued skbs
528 * to avoid OOO (Out Of Order) effects.
529 * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change
530 * counters.
531 */
Eric Dumazet18cb8092012-01-04 14:18:38 +0000532static void sfq_rehash(struct Qdisc *sch)
Eric Dumazet225d9b82011-12-21 03:30:11 +0000533{
Eric Dumazet18cb8092012-01-04 14:18:38 +0000534 struct sfq_sched_data *q = qdisc_priv(sch);
Eric Dumazet225d9b82011-12-21 03:30:11 +0000535 struct sk_buff *skb;
536 int i;
537 struct sfq_slot *slot;
538 struct sk_buff_head list;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000539 int dropped = 0;
Eric Dumazet225d9b82011-12-21 03:30:11 +0000540
541 __skb_queue_head_init(&list);
542
Eric Dumazet18cb8092012-01-04 14:18:38 +0000543 for (i = 0; i < q->maxflows; i++) {
Eric Dumazet225d9b82011-12-21 03:30:11 +0000544 slot = &q->slots[i];
545 if (!slot->qlen)
546 continue;
547 while (slot->qlen) {
548 skb = slot_dequeue_head(slot);
549 sfq_dec(q, i);
550 __skb_queue_tail(&list, skb);
551 }
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000552 slot->backlog = 0;
553 red_set_vars(&slot->vars);
Eric Dumazet225d9b82011-12-21 03:30:11 +0000554 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
555 }
556 q->tail = NULL;
557
558 while ((skb = __skb_dequeue(&list)) != NULL) {
559 unsigned int hash = sfq_hash(q, skb);
560 sfq_index x = q->ht[hash];
561
562 slot = &q->slots[x];
563 if (x == SFQ_EMPTY_SLOT) {
564 x = q->dep[0].next; /* get a free slot */
Eric Dumazet18cb8092012-01-04 14:18:38 +0000565 if (x >= SFQ_MAX_FLOWS) {
John Fastabend25331d62014-09-28 11:53:29 -0700566drop:
567 qdisc_qstats_backlog_dec(sch, skb);
Eric Dumazet18cb8092012-01-04 14:18:38 +0000568 kfree_skb(skb);
569 dropped++;
570 continue;
571 }
Eric Dumazet225d9b82011-12-21 03:30:11 +0000572 q->ht[hash] = x;
573 slot = &q->slots[x];
574 slot->hash = hash;
575 }
Eric Dumazet18cb8092012-01-04 14:18:38 +0000576 if (slot->qlen >= q->maxdepth)
577 goto drop;
Eric Dumazet225d9b82011-12-21 03:30:11 +0000578 slot_queue_add(slot, skb);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000579 if (q->red_parms)
580 slot->vars.qavg = red_calc_qavg(q->red_parms,
581 &slot->vars,
582 slot->backlog);
583 slot->backlog += qdisc_pkt_len(skb);
Eric Dumazet225d9b82011-12-21 03:30:11 +0000584 sfq_inc(q, x);
585 if (slot->qlen == 1) { /* The flow is new */
586 if (q->tail == NULL) { /* It is the first flow */
587 slot->next = x;
588 } else {
589 slot->next = q->tail->next;
590 q->tail->next = x;
591 }
592 q->tail = slot;
593 slot->allot = q->scaled_quantum;
594 }
595 }
Eric Dumazet18cb8092012-01-04 14:18:38 +0000596 sch->q.qlen -= dropped;
597 qdisc_tree_decrease_qlen(sch, dropped);
Eric Dumazet225d9b82011-12-21 03:30:11 +0000598}
599
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600static void sfq_perturbation(unsigned long arg)
601{
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800602 struct Qdisc *sch = (struct Qdisc *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 struct sfq_sched_data *q = qdisc_priv(sch);
Eric Dumazet225d9b82011-12-21 03:30:11 +0000604 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
Eric Dumazet225d9b82011-12-21 03:30:11 +0000606 spin_lock(root_lock);
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500607 q->perturbation = prandom_u32();
Eric Dumazet225d9b82011-12-21 03:30:11 +0000608 if (!q->filter_list && q->tail)
Eric Dumazet18cb8092012-01-04 14:18:38 +0000609 sfq_rehash(sch);
Eric Dumazet225d9b82011-12-21 03:30:11 +0000610 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611
Alexey Kuznetsov32740dd2007-09-30 17:51:33 -0700612 if (q->perturb_period)
613 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614}
615
Patrick McHardy1e904742008-01-22 22:11:17 -0800616static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617{
618 struct sfq_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800619 struct tc_sfq_qopt *ctl = nla_data(opt);
Eric Dumazet18cb8092012-01-04 14:18:38 +0000620 struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
Patrick McHardy5e50da02006-11-29 17:36:20 -0800621 unsigned int qlen;
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000622 struct red_parms *p = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623
Patrick McHardy1e904742008-01-22 22:11:17 -0800624 if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 return -EINVAL;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000626 if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1)))
627 ctl_v1 = nla_data(opt);
stephen hemminger119b3d32011-02-02 15:19:51 +0000628 if (ctl->divisor &&
629 (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
630 return -EINVAL;
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000631 if (ctl_v1 && ctl_v1->qth_min) {
632 p = kmalloc(sizeof(*p), GFP_KERNEL);
633 if (!p)
634 return -ENOMEM;
635 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 sch_tree_lock(sch);
Eric Dumazet18cb8092012-01-04 14:18:38 +0000637 if (ctl->quantum) {
638 q->quantum = ctl->quantum;
639 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
640 }
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800641 q->perturb_period = ctl->perturb_period * HZ;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000642 if (ctl->flows)
643 q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
644 if (ctl->divisor) {
Eric Dumazet817fb152011-01-20 00:14:58 +0000645 q->divisor = ctl->divisor;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000646 q->maxflows = min_t(u32, q->maxflows, q->divisor);
647 }
648 if (ctl_v1) {
649 if (ctl_v1->depth)
650 q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000651 if (p) {
652 swap(q->red_parms, p);
653 red_set_parms(q->red_parms,
654 ctl_v1->qth_min, ctl_v1->qth_max,
655 ctl_v1->Wlog,
656 ctl_v1->Plog, ctl_v1->Scell_log,
657 NULL,
658 ctl_v1->max_P);
659 }
660 q->flags = ctl_v1->flags;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000661 q->headdrop = ctl_v1->headdrop;
662 }
663 if (ctl->limit) {
664 q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows);
665 q->maxflows = min_t(u32, q->maxflows, q->limit);
666 }
667
Patrick McHardy5e50da02006-11-29 17:36:20 -0800668 qlen = sch->q.qlen;
Alexey Kuznetsov5588b402007-09-19 10:42:03 -0700669 while (sch->q.qlen > q->limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 sfq_drop(sch);
Patrick McHardy5e50da02006-11-29 17:36:20 -0800671 qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672
673 del_timer(&q->perturb_timer);
674 if (q->perturb_period) {
Alexey Kuznetsov32740dd2007-09-30 17:51:33 -0700675 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500676 q->perturbation = prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 }
678 sch_tree_unlock(sch);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000679 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 return 0;
681}
682
Eric Dumazetbd16a6c2012-01-04 06:22:24 +0000683static void *sfq_alloc(size_t sz)
684{
685 void *ptr = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN);
686
687 if (!ptr)
688 ptr = vmalloc(sz);
689 return ptr;
690}
691
692static void sfq_free(void *addr)
693{
WANG Cong4cb28972014-06-02 15:55:22 -0700694 kvfree(addr);
Eric Dumazetbd16a6c2012-01-04 06:22:24 +0000695}
696
697static void sfq_destroy(struct Qdisc *sch)
698{
699 struct sfq_sched_data *q = qdisc_priv(sch);
700
701 tcf_destroy_chain(&q->filter_list);
702 q->perturb_period = 0;
703 del_timer_sync(&q->perturb_timer);
704 sfq_free(q->ht);
Eric Dumazet18cb8092012-01-04 14:18:38 +0000705 sfq_free(q->slots);
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000706 kfree(q->red_parms);
Eric Dumazetbd16a6c2012-01-04 06:22:24 +0000707}
708
Patrick McHardy1e904742008-01-22 22:11:17 -0800709static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710{
711 struct sfq_sched_data *q = qdisc_priv(sch);
712 int i;
713
Stephen Hemmingerd3e99482008-01-20 17:18:45 -0800714 q->perturb_timer.function = sfq_perturbation;
Fernando Carrijoc19a28e2009-01-07 18:09:08 -0800715 q->perturb_timer.data = (unsigned long)sch;
Stephen Hemmingerd3e99482008-01-20 17:18:45 -0800716 init_timer_deferrable(&q->perturb_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Eric Dumazet18cb8092012-01-04 14:18:38 +0000718 for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) {
719 q->dep[i].next = i + SFQ_MAX_FLOWS;
720 q->dep[i].prev = i + SFQ_MAX_FLOWS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 }
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800722
Eric Dumazet18cb8092012-01-04 14:18:38 +0000723 q->limit = SFQ_MAX_DEPTH;
724 q->maxdepth = SFQ_MAX_DEPTH;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000725 q->cur_depth = 0;
726 q->tail = NULL;
Eric Dumazet817fb152011-01-20 00:14:58 +0000727 q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000728 q->maxflows = SFQ_DEFAULT_FLOWS;
Eric Dumazet02a90982012-01-04 06:23:01 +0000729 q->quantum = psched_mtu(qdisc_dev(sch));
730 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
731 q->perturb_period = 0;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500732 q->perturbation = prandom_u32();
Eric Dumazet02a90982012-01-04 06:23:01 +0000733
734 if (opt) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 int err = sfq_change(sch, opt);
736 if (err)
737 return err;
738 }
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800739
Eric Dumazetbd16a6c2012-01-04 06:22:24 +0000740 q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
Eric Dumazet18cb8092012-01-04 14:18:38 +0000741 q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
742 if (!q->ht || !q->slots) {
Eric Dumazetbd16a6c2012-01-04 06:22:24 +0000743 sfq_destroy(sch);
Eric Dumazet817fb152011-01-20 00:14:58 +0000744 return -ENOMEM;
Eric Dumazetbd16a6c2012-01-04 06:22:24 +0000745 }
Eric Dumazet817fb152011-01-20 00:14:58 +0000746 for (i = 0; i < q->divisor; i++)
747 q->ht[i] = SFQ_EMPTY_SLOT;
748
Eric Dumazet18cb8092012-01-04 14:18:38 +0000749 for (i = 0; i < q->maxflows; i++) {
Eric Dumazet18c8d822010-12-31 12:48:55 -0800750 slot_queue_init(&q->slots[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 sfq_link(q, i);
Eric Dumazet18c8d822010-12-31 12:48:55 -0800752 }
Eric Dumazet23624932011-01-21 16:26:09 -0800753 if (q->limit >= 1)
754 sch->flags |= TCQ_F_CAN_BYPASS;
755 else
756 sch->flags &= ~TCQ_F_CAN_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 return 0;
758}
759
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
761{
762 struct sfq_sched_data *q = qdisc_priv(sch);
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700763 unsigned char *b = skb_tail_pointer(skb);
Eric Dumazet18cb8092012-01-04 14:18:38 +0000764 struct tc_sfq_qopt_v1 opt;
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000765 struct red_parms *p = q->red_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766
Eric Dumazet18cb8092012-01-04 14:18:38 +0000767 memset(&opt, 0, sizeof(opt));
768 opt.v0.quantum = q->quantum;
769 opt.v0.perturb_period = q->perturb_period / HZ;
770 opt.v0.limit = q->limit;
771 opt.v0.divisor = q->divisor;
772 opt.v0.flows = q->maxflows;
773 opt.depth = q->maxdepth;
774 opt.headdrop = q->headdrop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000776 if (p) {
777 opt.qth_min = p->qth_min >> p->Wlog;
778 opt.qth_max = p->qth_max >> p->Wlog;
779 opt.Wlog = p->Wlog;
780 opt.Plog = p->Plog;
781 opt.Scell_log = p->Scell_log;
782 opt.max_P = p->max_P;
783 }
784 memcpy(&opt.stats, &q->stats, sizeof(opt.stats));
785 opt.flags = q->flags;
786
David S. Miller1b34ec42012-03-29 05:11:39 -0400787 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
788 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
790 return skb->len;
791
Patrick McHardy1e904742008-01-22 22:11:17 -0800792nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -0700793 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 return -1;
795}
796
Jarek Poplawski41065fb2010-08-10 22:31:02 +0000797static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
798{
799 return NULL;
800}
801
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800802static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
803{
804 return 0;
805}
806
Jarek Poplawskieb4a5522010-08-06 00:22:35 +0000807static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
808 u32 classid)
809{
Eric Dumazet23624932011-01-21 16:26:09 -0800810 /* we cannot bypass queue discipline anymore */
811 sch->flags &= ~TCQ_F_CAN_BYPASS;
Jarek Poplawskieb4a5522010-08-06 00:22:35 +0000812 return 0;
813}
814
Jarek Poplawskida7115d2010-08-09 12:18:17 +0000815static void sfq_put(struct Qdisc *q, unsigned long cl)
816{
817}
818
John Fastabend25d8c0d2014-09-12 20:05:27 -0700819static struct tcf_proto __rcu **sfq_find_tcf(struct Qdisc *sch,
820 unsigned long cl)
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800821{
822 struct sfq_sched_data *q = qdisc_priv(sch);
823
824 if (cl)
825 return NULL;
826 return &q->filter_list;
827}
828
Patrick McHardy94de78d2008-01-31 18:37:16 -0800829static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
830 struct sk_buff *skb, struct tcmsg *tcm)
831{
832 tcm->tcm_handle |= TC_H_MIN(cl);
833 return 0;
834}
835
836static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
837 struct gnet_dump *d)
838{
839 struct sfq_sched_data *q = qdisc_priv(sch);
Eric Dumazetee09b3c2010-12-22 11:39:59 -0800840 sfq_index idx = q->ht[cl - 1];
841 struct gnet_stats_queue qs = { 0 };
842 struct tc_sfq_xstats xstats = { 0 };
Eric Dumazetc4266262010-12-15 08:18:36 +0000843
Eric Dumazetee09b3c2010-12-22 11:39:59 -0800844 if (idx != SFQ_EMPTY_SLOT) {
845 const struct sfq_slot *slot = &q->slots[idx];
Patrick McHardy94de78d2008-01-31 18:37:16 -0800846
Eric Dumazeteeaeb062010-12-28 21:53:33 +0000847 xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
Eric Dumazetee09b3c2010-12-22 11:39:59 -0800848 qs.qlen = slot->qlen;
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000849 qs.backlog = slot->backlog;
Eric Dumazetee09b3c2010-12-22 11:39:59 -0800850 }
John Fastabendb0ab6f92014-09-28 11:54:24 -0700851 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
Patrick McHardy94de78d2008-01-31 18:37:16 -0800852 return -1;
853 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
854}
855
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800856static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
857{
Patrick McHardy94de78d2008-01-31 18:37:16 -0800858 struct sfq_sched_data *q = qdisc_priv(sch);
859 unsigned int i;
860
861 if (arg->stop)
862 return;
863
Eric Dumazet817fb152011-01-20 00:14:58 +0000864 for (i = 0; i < q->divisor; i++) {
Eric Dumazeteda83e32010-12-20 12:54:58 +0000865 if (q->ht[i] == SFQ_EMPTY_SLOT ||
Patrick McHardy94de78d2008-01-31 18:37:16 -0800866 arg->count < arg->skip) {
867 arg->count++;
868 continue;
869 }
870 if (arg->fn(sch, i + 1, arg) < 0) {
871 arg->stop = 1;
872 break;
873 }
874 arg->count++;
875 }
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800876}
877
878static const struct Qdisc_class_ops sfq_class_ops = {
Jarek Poplawski41065fb2010-08-10 22:31:02 +0000879 .leaf = sfq_leaf,
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800880 .get = sfq_get,
Jarek Poplawskida7115d2010-08-09 12:18:17 +0000881 .put = sfq_put,
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800882 .tcf_chain = sfq_find_tcf,
Jarek Poplawskieb4a5522010-08-06 00:22:35 +0000883 .bind_tcf = sfq_bind,
Jarek Poplawskida7115d2010-08-09 12:18:17 +0000884 .unbind_tcf = sfq_put,
Patrick McHardy94de78d2008-01-31 18:37:16 -0800885 .dump = sfq_dump_class,
886 .dump_stats = sfq_dump_class_stats,
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800887 .walk = sfq_walk,
888};
889
Eric Dumazet20fea082007-11-14 01:44:41 -0800890static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800891 .cl_ops = &sfq_class_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 .id = "sfq",
893 .priv_size = sizeof(struct sfq_sched_data),
894 .enqueue = sfq_enqueue,
895 .dequeue = sfq_dequeue,
Eric Dumazet07bd8df2011-05-25 04:40:11 +0000896 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 .drop = sfq_drop,
898 .init = sfq_init,
899 .reset = sfq_reset,
900 .destroy = sfq_destroy,
901 .change = NULL,
902 .dump = sfq_dump,
903 .owner = THIS_MODULE,
904};
905
906static int __init sfq_module_init(void)
907{
908 return register_qdisc(&sfq_qdisc_ops);
909}
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900910static void __exit sfq_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911{
912 unregister_qdisc(&sfq_qdisc_ops);
913}
914module_init(sfq_module_init)
915module_exit(sfq_module_exit)
916MODULE_LICENSE("GPL");