blob: 02eab7c51adbbdd4b99102e2a750e6b0b5b8dd1f [file] [log] [blame]
Michal Kazior557fc4a2016-04-22 14:20:13 +02001/*
2 * Copyright (c) 2016 Qualcomm Atheros, Inc
3 *
4 * GPL v2
5 *
6 * Based on net/sched/sch_fq_codel.c
7 */
8#ifndef __NET_SCHED_FQ_IMPL_H
9#define __NET_SCHED_FQ_IMPL_H
10
11#include <net/fq.h>
12
13/* functions that are embedded into includer */
14
15static struct sk_buff *fq_flow_dequeue(struct fq *fq,
16 struct fq_flow *flow)
17{
18 struct fq_tin *tin = flow->tin;
19 struct fq_flow *i;
20 struct sk_buff *skb;
21
22 lockdep_assert_held(&fq->lock);
23
24 skb = __skb_dequeue(&flow->queue);
25 if (!skb)
26 return NULL;
27
28 tin->backlog_bytes -= skb->len;
29 tin->backlog_packets--;
30 flow->backlog -= skb->len;
31 fq->backlog--;
32
33 if (flow->backlog == 0) {
34 list_del_init(&flow->backlogchain);
35 } else {
36 i = flow;
37
38 list_for_each_entry_continue(i, &fq->backlogs, backlogchain)
39 if (i->backlog < flow->backlog)
40 break;
41
42 list_move_tail(&flow->backlogchain,
43 &i->backlogchain);
44 }
45
46 return skb;
47}
48
49static struct sk_buff *fq_tin_dequeue(struct fq *fq,
50 struct fq_tin *tin,
51 fq_tin_dequeue_t dequeue_func)
52{
53 struct fq_flow *flow;
54 struct list_head *head;
55 struct sk_buff *skb;
56
57 lockdep_assert_held(&fq->lock);
58
59begin:
60 head = &tin->new_flows;
61 if (list_empty(head)) {
62 head = &tin->old_flows;
63 if (list_empty(head))
64 return NULL;
65 }
66
67 flow = list_first_entry(head, struct fq_flow, flowchain);
68
69 if (flow->deficit <= 0) {
70 flow->deficit += fq->quantum;
71 list_move_tail(&flow->flowchain,
72 &tin->old_flows);
73 goto begin;
74 }
75
76 skb = dequeue_func(fq, tin, flow);
77 if (!skb) {
78 /* force a pass through old_flows to prevent starvation */
79 if ((head == &tin->new_flows) &&
80 !list_empty(&tin->old_flows)) {
81 list_move_tail(&flow->flowchain, &tin->old_flows);
82 } else {
83 list_del_init(&flow->flowchain);
84 flow->tin = NULL;
85 }
86 goto begin;
87 }
88
89 flow->deficit -= skb->len;
90 tin->tx_bytes += skb->len;
91 tin->tx_packets++;
92
93 return skb;
94}
95
96static struct fq_flow *fq_flow_classify(struct fq *fq,
97 struct fq_tin *tin,
98 struct sk_buff *skb,
99 fq_flow_get_default_t get_default_func)
100{
101 struct fq_flow *flow;
102 u32 hash;
103 u32 idx;
104
105 lockdep_assert_held(&fq->lock);
106
107 hash = skb_get_hash_perturb(skb, fq->perturbation);
108 idx = reciprocal_scale(hash, fq->flows_cnt);
109 flow = &fq->flows[idx];
110
111 if (flow->tin && flow->tin != tin) {
112 flow = get_default_func(fq, tin, idx, skb);
113 tin->collisions++;
114 fq->collisions++;
115 }
116
117 if (!flow->tin)
118 tin->flows++;
119
120 return flow;
121}
122
123static void fq_tin_enqueue(struct fq *fq,
124 struct fq_tin *tin,
125 struct sk_buff *skb,
126 fq_skb_free_t free_func,
127 fq_flow_get_default_t get_default_func)
128{
129 struct fq_flow *flow;
130 struct fq_flow *i;
131
132 lockdep_assert_held(&fq->lock);
133
134 flow = fq_flow_classify(fq, tin, skb, get_default_func);
135
136 flow->tin = tin;
137 flow->backlog += skb->len;
138 tin->backlog_bytes += skb->len;
139 tin->backlog_packets++;
140 fq->backlog++;
141
142 if (list_empty(&flow->backlogchain))
143 list_add_tail(&flow->backlogchain, &fq->backlogs);
144
145 i = flow;
146 list_for_each_entry_continue_reverse(i, &fq->backlogs,
147 backlogchain)
148 if (i->backlog > flow->backlog)
149 break;
150
151 list_move(&flow->backlogchain, &i->backlogchain);
152
153 if (list_empty(&flow->flowchain)) {
154 flow->deficit = fq->quantum;
155 list_add_tail(&flow->flowchain,
156 &tin->new_flows);
157 }
158
159 __skb_queue_tail(&flow->queue, skb);
160
161 if (fq->backlog > fq->limit) {
162 flow = list_first_entry_or_null(&fq->backlogs,
163 struct fq_flow,
164 backlogchain);
165 if (!flow)
166 return;
167
168 skb = fq_flow_dequeue(fq, flow);
169 if (!skb)
170 return;
171
172 free_func(fq, flow->tin, flow, skb);
173
174 flow->tin->overlimit++;
175 fq->overlimit++;
176 }
177}
178
179static void fq_flow_reset(struct fq *fq,
180 struct fq_flow *flow,
181 fq_skb_free_t free_func)
182{
183 struct sk_buff *skb;
184
185 while ((skb = fq_flow_dequeue(fq, flow)))
186 free_func(fq, flow->tin, flow, skb);
187
188 if (!list_empty(&flow->flowchain))
189 list_del_init(&flow->flowchain);
190
191 if (!list_empty(&flow->backlogchain))
192 list_del_init(&flow->backlogchain);
193
194 flow->tin = NULL;
195
196 WARN_ON_ONCE(flow->backlog);
197}
198
199static void fq_tin_reset(struct fq *fq,
200 struct fq_tin *tin,
201 fq_skb_free_t free_func)
202{
203 struct list_head *head;
204 struct fq_flow *flow;
205
206 for (;;) {
207 head = &tin->new_flows;
208 if (list_empty(head)) {
209 head = &tin->old_flows;
210 if (list_empty(head))
211 break;
212 }
213
214 flow = list_first_entry(head, struct fq_flow, flowchain);
215 fq_flow_reset(fq, flow, free_func);
216 }
217
218 WARN_ON_ONCE(tin->backlog_bytes);
219 WARN_ON_ONCE(tin->backlog_packets);
220}
221
222static void fq_flow_init(struct fq_flow *flow)
223{
224 INIT_LIST_HEAD(&flow->flowchain);
225 INIT_LIST_HEAD(&flow->backlogchain);
226 __skb_queue_head_init(&flow->queue);
227}
228
229static void fq_tin_init(struct fq_tin *tin)
230{
231 INIT_LIST_HEAD(&tin->new_flows);
232 INIT_LIST_HEAD(&tin->old_flows);
233}
234
235static int fq_init(struct fq *fq, int flows_cnt)
236{
237 int i;
238
239 memset(fq, 0, sizeof(fq[0]));
240 INIT_LIST_HEAD(&fq->backlogs);
241 spin_lock_init(&fq->lock);
242 fq->flows_cnt = max_t(u32, flows_cnt, 1);
243 fq->perturbation = prandom_u32();
244 fq->quantum = 300;
245 fq->limit = 8192;
246
247 fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
248 if (!fq->flows)
249 return -ENOMEM;
250
251 for (i = 0; i < fq->flows_cnt; i++)
252 fq_flow_init(&fq->flows[i]);
253
254 return 0;
255}
256
257static void fq_reset(struct fq *fq,
258 fq_skb_free_t free_func)
259{
260 int i;
261
262 for (i = 0; i < fq->flows_cnt; i++)
263 fq_flow_reset(fq, &fq->flows[i], free_func);
264
265 kfree(fq->flows);
266 fq->flows = NULL;
267}
268
269#endif