blob: 69a3dbf55c60271723e580b209282c8b3ae91ae8 [file] [log] [blame]
Eric Dumazetafe4fd02013-08-29 15:49:55 -07001/*
2 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
3 *
Eric Dumazet86b3bfe2015-01-28 06:06:36 -08004 * Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
Eric Dumazetafe4fd02013-08-29 15:49:55 -07005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Meant to be mostly used for localy generated traffic :
12 * Fast classification depends on skb->sk being set before reaching us.
13 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
14 * All packets belonging to a socket are considered as a 'flow'.
15 *
16 * Flows are dynamically allocated and stored in a hash table of RB trees
17 * They are also part of one Round Robin 'queues' (new or old flows)
18 *
19 * Burst avoidance (aka pacing) capability :
20 *
21 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
22 * bunch of packets, and this packet scheduler adds delay between
23 * packets to respect rate limitation.
24 *
25 * enqueue() :
26 * - lookup one RB tree (out of 1024 or more) to find the flow.
27 * If non existent flow, create it, add it to the tree.
28 * Add skb to the per flow list of skb (fifo).
29 * - Use a special fifo for high prio packets
30 *
31 * dequeue() : serves flows in Round Robin
32 * Note : When a flow becomes empty, we do not immediately remove it from
33 * rb trees, for performance reasons (its expected to send additional packets,
34 * or SLAB cache will reuse socket for another flow)
35 */
36
37#include <linux/module.h>
38#include <linux/types.h>
39#include <linux/kernel.h>
40#include <linux/jiffies.h>
41#include <linux/string.h>
42#include <linux/in.h>
43#include <linux/errno.h>
44#include <linux/init.h>
45#include <linux/skbuff.h>
46#include <linux/slab.h>
47#include <linux/rbtree.h>
48#include <linux/hash.h>
Eric Dumazet08f89b92013-08-30 09:46:43 -070049#include <linux/prefetch.h>
Eric Dumazetc3bd8542013-12-15 13:15:25 -080050#include <linux/vmalloc.h>
Eric Dumazetafe4fd02013-08-29 15:49:55 -070051#include <net/netlink.h>
52#include <net/pkt_sched.h>
53#include <net/sock.h>
54#include <net/tcp_states.h>
Eric Dumazet98781962015-02-03 18:31:53 -080055#include <net/tcp.h>
Eric Dumazetafe4fd02013-08-29 15:49:55 -070056
57/*
58 * Per flow structure, dynamically allocated
59 */
60struct fq_flow {
61 struct sk_buff *head; /* list of skbs for this flow : first skb */
62 union {
63 struct sk_buff *tail; /* last skb in the list */
64 unsigned long age; /* jiffies when flow was emptied, for gc */
65 };
66 struct rb_node fq_node; /* anchor in fq_root[] trees */
67 struct sock *sk;
68 int qlen; /* number of packets in flow queue */
69 int credit;
70 u32 socket_hash; /* sk_hash */
71 struct fq_flow *next; /* next pointer in RR lists, or &detached */
72
73 struct rb_node rate_node; /* anchor in q->delayed tree */
74 u64 time_next_packet;
75};
76
77struct fq_flow_head {
78 struct fq_flow *first;
79 struct fq_flow *last;
80};
81
82struct fq_sched_data {
83 struct fq_flow_head new_flows;
84
85 struct fq_flow_head old_flows;
86
87 struct rb_root delayed; /* for rate limited flows */
88 u64 time_next_delayed_flow;
89
90 struct fq_flow internal; /* for non classified or high prio packets */
91 u32 quantum;
92 u32 initial_quantum;
Eric Dumazetf52ed892013-11-15 08:58:14 -080093 u32 flow_refill_delay;
Eric Dumazetafe4fd02013-08-29 15:49:55 -070094 u32 flow_max_rate; /* optional max rate per flow */
95 u32 flow_plimit; /* max packets per flow */
96 struct rb_root *fq_root;
97 u8 rate_enable;
98 u8 fq_trees_log;
99
100 u32 flows;
101 u32 inactive_flows;
102 u32 throttled_flows;
103
104 u64 stat_gc_flows;
105 u64 stat_internal_packets;
106 u64 stat_tcp_retrans;
107 u64 stat_throttled;
108 u64 stat_flows_plimit;
109 u64 stat_pkts_too_long;
110 u64 stat_allocation_errors;
111 struct qdisc_watchdog watchdog;
112};
113
114/* special value to mark a detached flow (not on old/new list) */
115static struct fq_flow detached, throttled;
116
117static void fq_flow_set_detached(struct fq_flow *f)
118{
119 f->next = &detached;
Eric Dumazetf52ed892013-11-15 08:58:14 -0800120 f->age = jiffies;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700121}
122
123static bool fq_flow_is_detached(const struct fq_flow *f)
124{
125 return f->next == &detached;
126}
127
128static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
129{
130 struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
131
132 while (*p) {
133 struct fq_flow *aux;
134
135 parent = *p;
136 aux = container_of(parent, struct fq_flow, rate_node);
137 if (f->time_next_packet >= aux->time_next_packet)
138 p = &parent->rb_right;
139 else
140 p = &parent->rb_left;
141 }
142 rb_link_node(&f->rate_node, parent, p);
143 rb_insert_color(&f->rate_node, &q->delayed);
144 q->throttled_flows++;
145 q->stat_throttled++;
146
147 f->next = &throttled;
148 if (q->time_next_delayed_flow > f->time_next_packet)
149 q->time_next_delayed_flow = f->time_next_packet;
150}
151
152
153static struct kmem_cache *fq_flow_cachep __read_mostly;
154
155static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
156{
157 if (head->first)
158 head->last->next = flow;
159 else
160 head->first = flow;
161 head->last = flow;
162 flow->next = NULL;
163}
164
165/* limit number of collected flows per round */
166#define FQ_GC_MAX 8
167#define FQ_GC_AGE (3*HZ)
168
169static bool fq_gc_candidate(const struct fq_flow *f)
170{
171 return fq_flow_is_detached(f) &&
172 time_after(jiffies, f->age + FQ_GC_AGE);
173}
174
175static void fq_gc(struct fq_sched_data *q,
176 struct rb_root *root,
177 struct sock *sk)
178{
179 struct fq_flow *f, *tofree[FQ_GC_MAX];
180 struct rb_node **p, *parent;
181 int fcnt = 0;
182
183 p = &root->rb_node;
184 parent = NULL;
185 while (*p) {
186 parent = *p;
187
188 f = container_of(parent, struct fq_flow, fq_node);
189 if (f->sk == sk)
190 break;
191
192 if (fq_gc_candidate(f)) {
193 tofree[fcnt++] = f;
194 if (fcnt == FQ_GC_MAX)
195 break;
196 }
197
198 if (f->sk > sk)
199 p = &parent->rb_right;
200 else
201 p = &parent->rb_left;
202 }
203
204 q->flows -= fcnt;
205 q->inactive_flows -= fcnt;
206 q->stat_gc_flows += fcnt;
207 while (fcnt) {
208 struct fq_flow *f = tofree[--fcnt];
209
210 rb_erase(&f->fq_node, root);
211 kmem_cache_free(fq_flow_cachep, f);
212 }
213}
214
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700215static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
216{
217 struct rb_node **p, *parent;
218 struct sock *sk = skb->sk;
219 struct rb_root *root;
220 struct fq_flow *f;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700221
222 /* warning: no starvation prevention... */
Maciej Żenczykowski2abc2f02013-11-14 08:50:43 -0800223 if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700224 return &q->internal;
225
226 if (unlikely(!sk)) {
227 /* By forcing low order bit to 1, we make sure to not
228 * collide with a local flow (socket pointers are word aligned)
229 */
Tom Herbert3958afa1b2013-12-15 22:12:06 -0800230 sk = (struct sock *)(skb_get_hash(skb) | 1L);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700231 }
232
233 root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)];
234
235 if (q->flows >= (2U << q->fq_trees_log) &&
236 q->inactive_flows > q->flows/2)
237 fq_gc(q, root, sk);
238
239 p = &root->rb_node;
240 parent = NULL;
241 while (*p) {
242 parent = *p;
243
244 f = container_of(parent, struct fq_flow, fq_node);
245 if (f->sk == sk) {
246 /* socket might have been reallocated, so check
247 * if its sk_hash is the same.
248 * It not, we need to refill credit with
249 * initial quantum
250 */
251 if (unlikely(skb->sk &&
252 f->socket_hash != sk->sk_hash)) {
253 f->credit = q->initial_quantum;
254 f->socket_hash = sk->sk_hash;
Eric Dumazetfc59d5b2013-10-27 16:26:43 -0700255 f->time_next_packet = 0ULL;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700256 }
257 return f;
258 }
259 if (f->sk > sk)
260 p = &parent->rb_right;
261 else
262 p = &parent->rb_left;
263 }
264
265 f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
266 if (unlikely(!f)) {
267 q->stat_allocation_errors++;
268 return &q->internal;
269 }
270 fq_flow_set_detached(f);
271 f->sk = sk;
272 if (skb->sk)
273 f->socket_hash = sk->sk_hash;
274 f->credit = q->initial_quantum;
275
276 rb_link_node(&f->fq_node, parent, p);
277 rb_insert_color(&f->fq_node, root);
278
279 q->flows++;
280 q->inactive_flows++;
281 return f;
282}
283
284
285/* remove one skb from head of flow queue */
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700286static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700287{
288 struct sk_buff *skb = flow->head;
289
290 if (skb) {
291 flow->head = skb->next;
292 skb->next = NULL;
293 flow->qlen--;
John Fastabend25331d62014-09-28 11:53:29 -0700294 qdisc_qstats_backlog_dec(sch, skb);
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700295 sch->q.qlen--;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700296 }
297 return skb;
298}
299
300/* We might add in the future detection of retransmits
301 * For the time being, just return false
302 */
303static bool skb_is_retransmit(struct sk_buff *skb)
304{
305 return false;
306}
307
308/* add skb to flow queue
309 * flow queue is a linked list, kind of FIFO, except for TCP retransmits
310 * We special case tcp retransmits to be transmitted before other packets.
311 * We rely on fact that TCP retransmits are unlikely, so we do not waste
312 * a separate queue or a pointer.
313 * head-> [retrans pkt 1]
314 * [retrans pkt 2]
315 * [ normal pkt 1]
316 * [ normal pkt 2]
317 * [ normal pkt 3]
318 * tail-> [ normal pkt 4]
319 */
320static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
321{
322 struct sk_buff *prev, *head = flow->head;
323
324 skb->next = NULL;
325 if (!head) {
326 flow->head = skb;
327 flow->tail = skb;
328 return;
329 }
330 if (likely(!skb_is_retransmit(skb))) {
331 flow->tail->next = skb;
332 flow->tail = skb;
333 return;
334 }
335
336 /* This skb is a tcp retransmit,
337 * find the last retrans packet in the queue
338 */
339 prev = NULL;
340 while (skb_is_retransmit(head)) {
341 prev = head;
342 head = head->next;
343 if (!head)
344 break;
345 }
346 if (!prev) { /* no rtx packet in queue, become the new head */
347 skb->next = flow->head;
348 flow->head = skb;
349 } else {
350 if (prev == flow->tail)
351 flow->tail = skb;
352 else
353 skb->next = prev->next;
354 prev->next = skb;
355 }
356}
357
358static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
359{
360 struct fq_sched_data *q = qdisc_priv(sch);
361 struct fq_flow *f;
362
363 if (unlikely(sch->q.qlen >= sch->limit))
364 return qdisc_drop(skb, sch);
365
366 f = fq_classify(skb, q);
367 if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
368 q->stat_flows_plimit++;
369 return qdisc_drop(skb, sch);
370 }
371
372 f->qlen++;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700373 if (skb_is_retransmit(skb))
374 q->stat_tcp_retrans++;
John Fastabend25331d62014-09-28 11:53:29 -0700375 qdisc_qstats_backlog_inc(sch, skb);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700376 if (fq_flow_is_detached(f)) {
377 fq_flow_add_tail(&q->new_flows, f);
Eric Dumazetf52ed892013-11-15 08:58:14 -0800378 if (time_after(jiffies, f->age + q->flow_refill_delay))
379 f->credit = max_t(u32, f->credit, q->quantum);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700380 q->inactive_flows--;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700381 }
Eric Dumazetf52ed892013-11-15 08:58:14 -0800382
383 /* Note: this overwrites f->age */
384 flow_queue_add(f, skb);
385
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700386 if (unlikely(f == &q->internal)) {
387 q->stat_internal_packets++;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700388 }
389 sch->q.qlen++;
390
391 return NET_XMIT_SUCCESS;
392}
393
394static void fq_check_throttled(struct fq_sched_data *q, u64 now)
395{
396 struct rb_node *p;
397
398 if (q->time_next_delayed_flow > now)
399 return;
400
401 q->time_next_delayed_flow = ~0ULL;
402 while ((p = rb_first(&q->delayed)) != NULL) {
403 struct fq_flow *f = container_of(p, struct fq_flow, rate_node);
404
405 if (f->time_next_packet > now) {
406 q->time_next_delayed_flow = f->time_next_packet;
407 break;
408 }
409 rb_erase(p, &q->delayed);
410 q->throttled_flows--;
411 fq_flow_add_tail(&q->old_flows, f);
412 }
413}
414
415static struct sk_buff *fq_dequeue(struct Qdisc *sch)
416{
417 struct fq_sched_data *q = qdisc_priv(sch);
Eric Dumazetd2de8752014-08-22 18:32:09 -0700418 u64 now = ktime_get_ns();
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700419 struct fq_flow_head *head;
420 struct sk_buff *skb;
421 struct fq_flow *f;
Eric Dumazet0eab5eb2013-10-01 09:10:16 -0700422 u32 rate;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700423
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700424 skb = fq_dequeue_head(sch, &q->internal);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700425 if (skb)
426 goto out;
427 fq_check_throttled(q, now);
428begin:
429 head = &q->new_flows;
430 if (!head->first) {
431 head = &q->old_flows;
432 if (!head->first) {
433 if (q->time_next_delayed_flow != ~0ULL)
434 qdisc_watchdog_schedule_ns(&q->watchdog,
Eric Dumazetf2600cf2014-10-04 10:11:31 -0700435 q->time_next_delayed_flow,
436 false);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700437 return NULL;
438 }
439 }
440 f = head->first;
441
442 if (f->credit <= 0) {
443 f->credit += q->quantum;
444 head->first = f->next;
445 fq_flow_add_tail(&q->old_flows, f);
446 goto begin;
447 }
448
Eric Dumazet98781962015-02-03 18:31:53 -0800449 skb = f->head;
450 if (unlikely(skb && now < f->time_next_packet &&
451 !skb_is_tcp_pure_ack(skb))) {
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700452 head->first = f->next;
453 fq_flow_set_throttled(q, f);
454 goto begin;
455 }
456
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700457 skb = fq_dequeue_head(sch, f);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700458 if (!skb) {
459 head->first = f->next;
460 /* force a pass through old_flows to prevent starvation */
461 if ((head == &q->new_flows) && q->old_flows.first) {
462 fq_flow_add_tail(&q->old_flows, f);
463 } else {
464 fq_flow_set_detached(f);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700465 q->inactive_flows++;
466 }
467 goto begin;
468 }
Eric Dumazet08f89b92013-08-30 09:46:43 -0700469 prefetch(&skb->end);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700470 f->credit -= qdisc_pkt_len(skb);
471
Eric Dumazet0eab5eb2013-10-01 09:10:16 -0700472 if (f->credit > 0 || !q->rate_enable)
473 goto out;
474
Eric Dumazet98781962015-02-03 18:31:53 -0800475 /* Do not pace locally generated ack packets */
476 if (skb_is_tcp_pure_ack(skb))
477 goto out;
478
Eric Dumazet7eec4172013-10-08 15:16:00 -0700479 rate = q->flow_max_rate;
Eric Dumazet86b3bfe2015-01-28 06:06:36 -0800480 if (skb->sk)
Eric Dumazet7eec4172013-10-08 15:16:00 -0700481 rate = min(skb->sk->sk_pacing_rate, rate);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700482
Eric Dumazet7eec4172013-10-08 15:16:00 -0700483 if (rate != ~0U) {
Eric Dumazet0eab5eb2013-10-01 09:10:16 -0700484 u32 plen = max(qdisc_pkt_len(skb), q->quantum);
485 u64 len = (u64)plen * NSEC_PER_SEC;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700486
Eric Dumazet7eec4172013-10-08 15:16:00 -0700487 if (likely(rate))
488 do_div(len, rate);
Eric Dumazet0eab5eb2013-10-01 09:10:16 -0700489 /* Since socket rate can change later,
Eric Dumazetced7a042014-11-25 08:57:29 -0800490 * clamp the delay to 1 second.
491 * Really, providers of too big packets should be fixed !
Eric Dumazet0eab5eb2013-10-01 09:10:16 -0700492 */
Eric Dumazetced7a042014-11-25 08:57:29 -0800493 if (unlikely(len > NSEC_PER_SEC)) {
494 len = NSEC_PER_SEC;
Eric Dumazet0eab5eb2013-10-01 09:10:16 -0700495 q->stat_pkts_too_long++;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700496 }
Eric Dumazet0eab5eb2013-10-01 09:10:16 -0700497
498 f->time_next_packet = now + len;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700499 }
500out:
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700501 qdisc_bstats_update(sch, skb);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700502 return skb;
503}
504
505static void fq_reset(struct Qdisc *sch)
506{
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700507 struct fq_sched_data *q = qdisc_priv(sch);
508 struct rb_root *root;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700509 struct sk_buff *skb;
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700510 struct rb_node *p;
511 struct fq_flow *f;
512 unsigned int idx;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700513
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700514 while ((skb = fq_dequeue_head(sch, &q->internal)) != NULL)
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700515 kfree_skb(skb);
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700516
517 if (!q->fq_root)
518 return;
519
520 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
521 root = &q->fq_root[idx];
522 while ((p = rb_first(root)) != NULL) {
523 f = container_of(p, struct fq_flow, fq_node);
524 rb_erase(p, root);
525
526 while ((skb = fq_dequeue_head(sch, f)) != NULL)
527 kfree_skb(skb);
528
529 kmem_cache_free(fq_flow_cachep, f);
530 }
531 }
532 q->new_flows.first = NULL;
533 q->old_flows.first = NULL;
534 q->delayed = RB_ROOT;
535 q->flows = 0;
536 q->inactive_flows = 0;
537 q->throttled_flows = 0;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700538}
539
540static void fq_rehash(struct fq_sched_data *q,
541 struct rb_root *old_array, u32 old_log,
542 struct rb_root *new_array, u32 new_log)
543{
544 struct rb_node *op, **np, *parent;
545 struct rb_root *oroot, *nroot;
546 struct fq_flow *of, *nf;
547 int fcnt = 0;
548 u32 idx;
549
550 for (idx = 0; idx < (1U << old_log); idx++) {
551 oroot = &old_array[idx];
552 while ((op = rb_first(oroot)) != NULL) {
553 rb_erase(op, oroot);
554 of = container_of(op, struct fq_flow, fq_node);
555 if (fq_gc_candidate(of)) {
556 fcnt++;
557 kmem_cache_free(fq_flow_cachep, of);
558 continue;
559 }
560 nroot = &new_array[hash_32((u32)(long)of->sk, new_log)];
561
562 np = &nroot->rb_node;
563 parent = NULL;
564 while (*np) {
565 parent = *np;
566
567 nf = container_of(parent, struct fq_flow, fq_node);
568 BUG_ON(nf->sk == of->sk);
569
570 if (nf->sk > of->sk)
571 np = &parent->rb_right;
572 else
573 np = &parent->rb_left;
574 }
575
576 rb_link_node(&of->fq_node, parent, np);
577 rb_insert_color(&of->fq_node, nroot);
578 }
579 }
580 q->flows -= fcnt;
581 q->inactive_flows -= fcnt;
582 q->stat_gc_flows += fcnt;
583}
584
Eric Dumazetc3bd8542013-12-15 13:15:25 -0800585static void *fq_alloc_node(size_t sz, int node)
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700586{
Eric Dumazetc3bd8542013-12-15 13:15:25 -0800587 void *ptr;
588
589 ptr = kmalloc_node(sz, GFP_KERNEL | __GFP_REPEAT | __GFP_NOWARN, node);
590 if (!ptr)
591 ptr = vmalloc_node(sz, node);
592 return ptr;
593}
594
595static void fq_free(void *addr)
596{
WANG Cong4cb28972014-06-02 15:55:22 -0700597 kvfree(addr);
Eric Dumazetc3bd8542013-12-15 13:15:25 -0800598}
599
600static int fq_resize(struct Qdisc *sch, u32 log)
601{
602 struct fq_sched_data *q = qdisc_priv(sch);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700603 struct rb_root *array;
Eric Dumazet2d8d40a2014-03-06 22:57:52 -0800604 void *old_fq_root;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700605 u32 idx;
606
607 if (q->fq_root && log == q->fq_trees_log)
608 return 0;
609
Eric Dumazetc3bd8542013-12-15 13:15:25 -0800610 /* If XPS was setup, we can allocate memory on right NUMA node */
611 array = fq_alloc_node(sizeof(struct rb_root) << log,
612 netdev_queue_numa_node_read(sch->dev_queue));
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700613 if (!array)
614 return -ENOMEM;
615
616 for (idx = 0; idx < (1U << log); idx++)
617 array[idx] = RB_ROOT;
618
Eric Dumazet2d8d40a2014-03-06 22:57:52 -0800619 sch_tree_lock(sch);
620
621 old_fq_root = q->fq_root;
622 if (old_fq_root)
623 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
624
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700625 q->fq_root = array;
626 q->fq_trees_log = log;
627
Eric Dumazet2d8d40a2014-03-06 22:57:52 -0800628 sch_tree_unlock(sch);
629
630 fq_free(old_fq_root);
631
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700632 return 0;
633}
634
635static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
636 [TCA_FQ_PLIMIT] = { .type = NLA_U32 },
637 [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 },
638 [TCA_FQ_QUANTUM] = { .type = NLA_U32 },
639 [TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 },
640 [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 },
641 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 },
642 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
643 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
Eric Dumazetf52ed892013-11-15 08:58:14 -0800644 [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700645};
646
647static int fq_change(struct Qdisc *sch, struct nlattr *opt)
648{
649 struct fq_sched_data *q = qdisc_priv(sch);
650 struct nlattr *tb[TCA_FQ_MAX + 1];
651 int err, drop_count = 0;
652 u32 fq_log;
653
654 if (!opt)
655 return -EINVAL;
656
657 err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy);
658 if (err < 0)
659 return err;
660
661 sch_tree_lock(sch);
662
663 fq_log = q->fq_trees_log;
664
665 if (tb[TCA_FQ_BUCKETS_LOG]) {
666 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
667
668 if (nval >= 1 && nval <= ilog2(256*1024))
669 fq_log = nval;
670 else
671 err = -EINVAL;
672 }
673 if (tb[TCA_FQ_PLIMIT])
674 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
675
676 if (tb[TCA_FQ_FLOW_PLIMIT])
677 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
678
679 if (tb[TCA_FQ_QUANTUM])
680 q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
681
682 if (tb[TCA_FQ_INITIAL_QUANTUM])
Eric Dumazetede869c2013-10-07 12:50:18 -0700683 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700684
685 if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
Eric Dumazet65c5189a2013-11-15 08:57:26 -0800686 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
687 nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700688
689 if (tb[TCA_FQ_FLOW_MAX_RATE])
690 q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
691
692 if (tb[TCA_FQ_RATE_ENABLE]) {
693 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
694
695 if (enable <= 1)
696 q->rate_enable = enable;
697 else
698 err = -EINVAL;
699 }
700
Eric Dumazetf52ed892013-11-15 08:58:14 -0800701 if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
702 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
703
704 q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
705 }
706
Eric Dumazet2d8d40a2014-03-06 22:57:52 -0800707 if (!err) {
708 sch_tree_unlock(sch);
Eric Dumazetc3bd8542013-12-15 13:15:25 -0800709 err = fq_resize(sch, fq_log);
Eric Dumazet2d8d40a2014-03-06 22:57:52 -0800710 sch_tree_lock(sch);
711 }
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700712 while (sch->q.qlen > sch->limit) {
713 struct sk_buff *skb = fq_dequeue(sch);
714
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700715 if (!skb)
716 break;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700717 kfree_skb(skb);
718 drop_count++;
719 }
720 qdisc_tree_decrease_qlen(sch, drop_count);
721
722 sch_tree_unlock(sch);
723 return err;
724}
725
726static void fq_destroy(struct Qdisc *sch)
727{
728 struct fq_sched_data *q = qdisc_priv(sch);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700729
Eric Dumazet8d34ce12013-09-27 14:20:01 -0700730 fq_reset(sch);
Eric Dumazetc3bd8542013-12-15 13:15:25 -0800731 fq_free(q->fq_root);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700732 qdisc_watchdog_cancel(&q->watchdog);
733}
734
735static int fq_init(struct Qdisc *sch, struct nlattr *opt)
736{
737 struct fq_sched_data *q = qdisc_priv(sch);
738 int err;
739
740 sch->limit = 10000;
741 q->flow_plimit = 100;
742 q->quantum = 2 * psched_mtu(qdisc_dev(sch));
743 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
Eric Dumazetf52ed892013-11-15 08:58:14 -0800744 q->flow_refill_delay = msecs_to_jiffies(40);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700745 q->flow_max_rate = ~0U;
746 q->rate_enable = 1;
747 q->new_flows.first = NULL;
748 q->old_flows.first = NULL;
749 q->delayed = RB_ROOT;
750 q->fq_root = NULL;
751 q->fq_trees_log = ilog2(1024);
752 qdisc_watchdog_init(&q->watchdog, sch);
753
754 if (opt)
755 err = fq_change(sch, opt);
756 else
Eric Dumazetc3bd8542013-12-15 13:15:25 -0800757 err = fq_resize(sch, q->fq_trees_log);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700758
759 return err;
760}
761
762static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
763{
764 struct fq_sched_data *q = qdisc_priv(sch);
765 struct nlattr *opts;
766
767 opts = nla_nest_start(skb, TCA_OPTIONS);
768 if (opts == NULL)
769 goto nla_put_failure;
770
Eric Dumazet65c5189a2013-11-15 08:57:26 -0800771 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
772
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700773 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
774 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
775 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
776 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
777 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700778 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
Eric Dumazetf52ed892013-11-15 08:58:14 -0800779 nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
780 jiffies_to_usecs(q->flow_refill_delay)) ||
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700781 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
782 goto nla_put_failure;
783
Yang Yingliangd59b7d82014-03-12 10:20:32 +0800784 return nla_nest_end(skb, opts);
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700785
786nla_put_failure:
787 return -1;
788}
789
790static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
791{
792 struct fq_sched_data *q = qdisc_priv(sch);
Eric Dumazetd2de8752014-08-22 18:32:09 -0700793 u64 now = ktime_get_ns();
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700794 struct tc_fq_qd_stats st = {
795 .gc_flows = q->stat_gc_flows,
796 .highprio_packets = q->stat_internal_packets,
797 .tcp_retrans = q->stat_tcp_retrans,
798 .throttled = q->stat_throttled,
799 .flows_plimit = q->stat_flows_plimit,
800 .pkts_too_long = q->stat_pkts_too_long,
801 .allocation_errors = q->stat_allocation_errors,
802 .flows = q->flows,
803 .inactive_flows = q->inactive_flows,
804 .throttled_flows = q->throttled_flows,
805 .time_next_delayed_flow = q->time_next_delayed_flow - now,
806 };
807
808 return gnet_stats_copy_app(d, &st, sizeof(st));
809}
810
811static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
812 .id = "fq",
813 .priv_size = sizeof(struct fq_sched_data),
814
815 .enqueue = fq_enqueue,
816 .dequeue = fq_dequeue,
817 .peek = qdisc_peek_dequeued,
818 .init = fq_init,
819 .reset = fq_reset,
820 .destroy = fq_destroy,
821 .change = fq_change,
822 .dump = fq_dump,
823 .dump_stats = fq_dump_stats,
824 .owner = THIS_MODULE,
825};
826
827static int __init fq_module_init(void)
828{
829 int ret;
830
831 fq_flow_cachep = kmem_cache_create("fq_flow_cache",
832 sizeof(struct fq_flow),
833 0, 0, NULL);
834 if (!fq_flow_cachep)
835 return -ENOMEM;
836
837 ret = register_qdisc(&fq_qdisc_ops);
838 if (ret)
839 kmem_cache_destroy(fq_flow_cachep);
840 return ret;
841}
842
843static void __exit fq_module_exit(void)
844{
845 unregister_qdisc(&fq_qdisc_ops);
846 kmem_cache_destroy(fq_flow_cachep);
847}
848
849module_init(fq_module_init)
850module_exit(fq_module_exit)
851MODULE_AUTHOR("Eric Dumazet");
852MODULE_LICENSE("GPL");