blob: 5d09e8698b57584cb66bbc83e96295955c68c360 [file] [log] [blame]
Jiri Bencf0706e822007-05-05 11:45:53 -07001/*
2 * Copyright 2004, Instant802 Networks, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/netdevice.h>
10#include <linux/skbuff.h>
11#include <linux/module.h>
12#include <linux/if_arp.h>
13#include <linux/types.h>
14#include <net/ip.h>
15#include <net/pkt_sched.h>
16
17#include <net/mac80211.h>
18#include "ieee80211_i.h"
19#include "wme.h"
20
Jiri Bencf0706e822007-05-05 11:45:53 -070021/* maximum number of hardware queues we support. */
Ron Rindjunsky9e723492008-01-28 14:07:18 +020022#define TC_80211_MAX_QUEUES 16
23
24const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
Jiri Bencf0706e822007-05-05 11:45:53 -070025
26struct ieee80211_sched_data
27{
Ron Rindjunskya9af2012008-01-30 12:58:45 +020028 unsigned long qdisc_pool[BITS_TO_LONGS(TC_80211_MAX_QUEUES)];
Jiri Bencf0706e822007-05-05 11:45:53 -070029 struct tcf_proto *filter_list;
30 struct Qdisc *queues[TC_80211_MAX_QUEUES];
31 struct sk_buff_head requeued[TC_80211_MAX_QUEUES];
32};
33
Guy Cohena8bdf292008-01-09 19:12:48 +020034static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
Jiri Bencf0706e822007-05-05 11:45:53 -070035
36/* given a data frame determine the 802.1p/1d tag to use */
37static inline unsigned classify_1d(struct sk_buff *skb, struct Qdisc *qd)
38{
39 struct iphdr *ip;
40 int dscp;
41 int offset;
42
43 struct ieee80211_sched_data *q = qdisc_priv(qd);
44 struct tcf_result res = { -1, 0 };
45
46 /* if there is a user set filter list, call out to that */
47 if (q->filter_list) {
48 tc_classify(skb, q->filter_list, &res);
49 if (res.class != -1)
50 return res.class;
51 }
52
53 /* skb->priority values from 256->263 are magic values to
54 * directly indicate a specific 802.1d priority.
55 * This is used to allow 802.1d priority to be passed directly in
56 * from VLAN tags, etc. */
57 if (skb->priority >= 256 && skb->priority <= 263)
58 return skb->priority - 256;
59
60 /* check there is a valid IP header present */
Guy Cohena8bdf292008-01-09 19:12:48 +020061 offset = ieee80211_get_hdrlen_from_skb(skb);
62 if (skb->len < offset + sizeof(llc_ip_hdr) + sizeof(*ip) ||
63 memcmp(skb->data + offset, llc_ip_hdr, sizeof(llc_ip_hdr)))
Jiri Bencf0706e822007-05-05 11:45:53 -070064 return 0;
65
Guy Cohena8bdf292008-01-09 19:12:48 +020066 ip = (struct iphdr *) (skb->data + offset + sizeof(llc_ip_hdr));
Jiri Bencf0706e822007-05-05 11:45:53 -070067
68 dscp = ip->tos & 0xfc;
69 if (dscp & 0x1c)
70 return 0;
71 return dscp >> 5;
72}
73
74
75static inline int wme_downgrade_ac(struct sk_buff *skb)
76{
77 switch (skb->priority) {
78 case 6:
79 case 7:
80 skb->priority = 5; /* VO -> VI */
81 return 0;
82 case 4:
83 case 5:
84 skb->priority = 3; /* VI -> BE */
85 return 0;
86 case 0:
87 case 3:
88 skb->priority = 2; /* BE -> BK */
89 return 0;
90 default:
91 return -1;
92 }
93}
94
95
96/* positive return value indicates which queue to use
97 * negative return value indicates to drop the frame */
98static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd)
99{
100 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
Jiri Bencf0706e822007-05-05 11:45:53 -0700101 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
102 unsigned short fc = le16_to_cpu(hdr->frame_control);
103 int qos;
Jiri Bencf0706e822007-05-05 11:45:53 -0700104
105 /* see if frame is data or non data frame */
106 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) {
107 /* management frames go on AC_VO queue, but are sent
108 * without QoS control fields */
109 return IEEE80211_TX_QUEUE_DATA0;
110 }
111
Johannes Bergf9d540e2007-09-28 14:02:09 +0200112 if (0 /* injected */) {
113 /* use AC from radiotap */
Jiri Bencf0706e822007-05-05 11:45:53 -0700114 }
115
116 /* is this a QoS frame? */
117 qos = fc & IEEE80211_STYPE_QOS_DATA;
118
119 if (!qos) {
120 skb->priority = 0; /* required for correct WPA/11i MIC */
121 return ieee802_1d_to_ac[skb->priority];
122 }
123
124 /* use the data classifier to determine what 802.1d tag the
Johannes Berg3c3b00c2007-08-28 17:01:55 -0400125 * data frame has */
Jiri Bencf0706e822007-05-05 11:45:53 -0700126 skb->priority = classify_1d(skb, qd);
127
Johannes Berg3c3b00c2007-08-28 17:01:55 -0400128 /* in case we are a client verify acm is not set for this ac */
Jiri Bencf0706e822007-05-05 11:45:53 -0700129 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
130 if (wme_downgrade_ac(skb)) {
Johannes Berg3c3b00c2007-08-28 17:01:55 -0400131 /* No AC with lower priority has acm=0, drop packet. */
Jiri Bencf0706e822007-05-05 11:45:53 -0700132 return -1;
133 }
134 }
135
136 /* look up which queue to use for frames with this 1d tag */
137 return ieee802_1d_to_ac[skb->priority];
138}
139
140
141static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
142{
143 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
144 struct ieee80211_sched_data *q = qdisc_priv(qd);
145 struct ieee80211_tx_packet_data *pkt_data =
146 (struct ieee80211_tx_packet_data *) skb->cb;
147 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
148 unsigned short fc = le16_to_cpu(hdr->frame_control);
149 struct Qdisc *qdisc;
150 int err, queue;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200151 struct sta_info *sta;
152 u8 tid;
Jiri Bencf0706e822007-05-05 11:45:53 -0700153
Jiri Slabye8bf9642007-08-28 17:01:54 -0400154 if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200155 queue = pkt_data->queue;
Johannes Bergd0709a62008-02-25 16:27:46 +0100156 rcu_read_lock();
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200157 sta = sta_info_get(local, hdr->addr1);
158 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
159 if (sta) {
160 int ampdu_queue = sta->tid_to_tx_q[tid];
161 if ((ampdu_queue < local->hw.queues) &&
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200162 test_bit(ampdu_queue, q->qdisc_pool)) {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200163 queue = ampdu_queue;
164 pkt_data->flags |= IEEE80211_TXPD_AMPDU;
165 } else {
166 pkt_data->flags &= ~IEEE80211_TXPD_AMPDU;
167 }
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200168 }
Johannes Bergd0709a62008-02-25 16:27:46 +0100169 rcu_read_unlock();
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200170 skb_queue_tail(&q->requeued[queue], skb);
Jiri Bencf0706e822007-05-05 11:45:53 -0700171 qd->q.qlen++;
172 return 0;
173 }
174
175 queue = classify80211(skb, qd);
176
177 /* now we know the 1d priority, fill in the QoS header if there is one
178 */
179 if (WLAN_FC_IS_QOS_DATA(fc)) {
180 u8 *p = skb->data + ieee80211_get_hdrlen(fc) - 2;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200181 u8 ack_policy = 0;
182 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
Jiri Bencf0706e822007-05-05 11:45:53 -0700183 if (local->wifi_wme_noack_test)
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200184 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
Jiri Bencf0706e822007-05-05 11:45:53 -0700185 QOS_CONTROL_ACK_POLICY_SHIFT;
186 /* qos header is 2 bytes, second reserved */
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200187 *p = ack_policy | tid;
Jiri Bencf0706e822007-05-05 11:45:53 -0700188 p++;
189 *p = 0;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200190
Johannes Bergd0709a62008-02-25 16:27:46 +0100191 rcu_read_lock();
192
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200193 sta = sta_info_get(local, hdr->addr1);
194 if (sta) {
195 int ampdu_queue = sta->tid_to_tx_q[tid];
196 if ((ampdu_queue < local->hw.queues) &&
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200197 test_bit(ampdu_queue, q->qdisc_pool)) {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200198 queue = ampdu_queue;
199 pkt_data->flags |= IEEE80211_TXPD_AMPDU;
200 } else {
201 pkt_data->flags &= ~IEEE80211_TXPD_AMPDU;
202 }
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200203 }
Johannes Bergd0709a62008-02-25 16:27:46 +0100204
205 rcu_read_unlock();
Jiri Bencf0706e822007-05-05 11:45:53 -0700206 }
207
208 if (unlikely(queue >= local->hw.queues)) {
209#if 0
210 if (net_ratelimit()) {
211 printk(KERN_DEBUG "%s - queue=%d (hw does not "
212 "support) -> %d\n",
213 __func__, queue, local->hw.queues - 1);
214 }
215#endif
216 queue = local->hw.queues - 1;
217 }
218
219 if (unlikely(queue < 0)) {
220 kfree_skb(skb);
221 err = NET_XMIT_DROP;
222 } else {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200223 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
Jiri Bencf0706e822007-05-05 11:45:53 -0700224 pkt_data->queue = (unsigned int) queue;
225 qdisc = q->queues[queue];
226 err = qdisc->enqueue(skb, qdisc);
227 if (err == NET_XMIT_SUCCESS) {
228 qd->q.qlen++;
229 qd->bstats.bytes += skb->len;
230 qd->bstats.packets++;
231 return NET_XMIT_SUCCESS;
232 }
233 }
234 qd->qstats.drops++;
235 return err;
236}
237
238
239/* TODO: clean up the cases where master_hard_start_xmit
240 * returns non 0 - it shouldn't ever do that. Once done we
241 * can remove this function */
242static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd)
243{
244 struct ieee80211_sched_data *q = qdisc_priv(qd);
245 struct ieee80211_tx_packet_data *pkt_data =
246 (struct ieee80211_tx_packet_data *) skb->cb;
247 struct Qdisc *qdisc;
248 int err;
249
250 /* we recorded which queue to use earlier! */
251 qdisc = q->queues[pkt_data->queue];
252
253 if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) {
254 qd->q.qlen++;
255 return 0;
256 }
257 qd->qstats.drops++;
258 return err;
259}
260
261
262static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
263{
264 struct ieee80211_sched_data *q = qdisc_priv(qd);
265 struct net_device *dev = qd->dev;
266 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
267 struct ieee80211_hw *hw = &local->hw;
268 struct sk_buff *skb;
269 struct Qdisc *qdisc;
270 int queue;
271
272 /* check all the h/w queues in numeric/priority order */
273 for (queue = 0; queue < hw->queues; queue++) {
274 /* see if there is room in this hardware queue */
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200275 if ((test_bit(IEEE80211_LINK_STATE_XOFF,
276 &local->state[queue])) ||
277 (test_bit(IEEE80211_LINK_STATE_PENDING,
278 &local->state[queue])) ||
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200279 (!test_bit(queue, q->qdisc_pool)))
Jiri Bencf0706e822007-05-05 11:45:53 -0700280 continue;
281
282 /* there is space - try and get a frame */
283 skb = skb_dequeue(&q->requeued[queue]);
284 if (skb) {
285 qd->q.qlen--;
286 return skb;
287 }
288
289 qdisc = q->queues[queue];
290 skb = qdisc->dequeue(qdisc);
291 if (skb) {
292 qd->q.qlen--;
293 return skb;
294 }
295 }
296 /* returning a NULL here when all the h/w queues are full means we
297 * never need to call netif_stop_queue in the driver */
298 return NULL;
299}
300
301
302static void wme_qdiscop_reset(struct Qdisc* qd)
303{
304 struct ieee80211_sched_data *q = qdisc_priv(qd);
305 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
306 struct ieee80211_hw *hw = &local->hw;
307 int queue;
308
309 /* QUESTION: should we have some hardware flush functionality here? */
310
311 for (queue = 0; queue < hw->queues; queue++) {
312 skb_queue_purge(&q->requeued[queue]);
313 qdisc_reset(q->queues[queue]);
314 }
315 qd->q.qlen = 0;
316}
317
318
319static void wme_qdiscop_destroy(struct Qdisc* qd)
320{
321 struct ieee80211_sched_data *q = qdisc_priv(qd);
322 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
323 struct ieee80211_hw *hw = &local->hw;
324 int queue;
325
Patrick McHardyff31ab52008-07-01 19:52:38 -0700326 tcf_destroy_chain(&q->filter_list);
Jiri Bencf0706e822007-05-05 11:45:53 -0700327
328 for (queue=0; queue < hw->queues; queue++) {
329 skb_queue_purge(&q->requeued[queue]);
330 qdisc_destroy(q->queues[queue]);
331 q->queues[queue] = &noop_qdisc;
332 }
333}
334
335
336/* called whenever parameters are updated on existing qdisc */
Patrick McHardy1e904742008-01-22 22:11:17 -0800337static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt)
Jiri Bencf0706e822007-05-05 11:45:53 -0700338{
339/* struct ieee80211_sched_data *q = qdisc_priv(qd);
340*/
341 /* check our options block is the right size */
342 /* copy any options to our local structure */
343/* Ignore options block for now - always use static mapping
Patrick McHardy1e904742008-01-22 22:11:17 -0800344 struct tc_ieee80211_qopt *qopt = nla_data(opt);
Jiri Bencf0706e822007-05-05 11:45:53 -0700345
Patrick McHardy1e904742008-01-22 22:11:17 -0800346 if (opt->nla_len < nla_attr_size(sizeof(*qopt)))
Jiri Bencf0706e822007-05-05 11:45:53 -0700347 return -EINVAL;
348 memcpy(q->tag2queue, qopt->tag2queue, sizeof(qopt->tag2queue));
349*/
350 return 0;
351}
352
353
354/* called during initial creation of qdisc on device */
Patrick McHardy1e904742008-01-22 22:11:17 -0800355static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
Jiri Bencf0706e822007-05-05 11:45:53 -0700356{
357 struct ieee80211_sched_data *q = qdisc_priv(qd);
358 struct net_device *dev = qd->dev;
359 struct ieee80211_local *local;
360 int queues;
361 int err = 0, i;
362
363 /* check that device is a mac80211 device */
364 if (!dev->ieee80211_ptr ||
365 dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
366 return -EINVAL;
367
368 /* check this device is an ieee80211 master type device */
369 if (dev->type != ARPHRD_IEEE80211)
370 return -EINVAL;
371
372 /* check that there is no qdisc currently attached to device
373 * this ensures that we will be the root qdisc. (I can't find a better
374 * way to test this explicitly) */
375 if (dev->qdisc_sleeping != &noop_qdisc)
376 return -EINVAL;
377
378 if (qd->flags & TCQ_F_INGRESS)
379 return -EINVAL;
380
381 local = wdev_priv(dev->ieee80211_ptr);
382 queues = local->hw.queues;
383
384 /* if options were passed in, set them */
385 if (opt) {
386 err = wme_qdiscop_tune(qd, opt);
387 }
388
389 /* create child queues */
390 for (i = 0; i < queues; i++) {
391 skb_queue_head_init(&q->requeued[i]);
392 q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops,
393 qd->handle);
Johannes Berg136e83d2007-09-10 13:55:08 +0200394 if (!q->queues[i]) {
Jiri Bencf0706e822007-05-05 11:45:53 -0700395 q->queues[i] = &noop_qdisc;
Pavel Roskina4278e12008-05-12 09:02:24 -0400396 printk(KERN_ERR "%s child qdisc %i creation failed\n",
397 dev->name, i);
Jiri Bencf0706e822007-05-05 11:45:53 -0700398 }
399 }
400
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200401 /* reserve all legacy QoS queues */
402 for (i = 0; i < min(IEEE80211_TX_QUEUE_DATA4, queues); i++)
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200403 set_bit(i, q->qdisc_pool);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200404
Jiri Bencf0706e822007-05-05 11:45:53 -0700405 return err;
406}
407
408static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb)
409{
410/* struct ieee80211_sched_data *q = qdisc_priv(qd);
411 unsigned char *p = skb->tail;
412 struct tc_ieee80211_qopt opt;
413
414 memcpy(&opt.tag2queue, q->tag2queue, TC_80211_MAX_TAG + 1);
Patrick McHardy1e904742008-01-22 22:11:17 -0800415 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
Jiri Bencf0706e822007-05-05 11:45:53 -0700416*/ return skb->len;
417/*
Patrick McHardy1e904742008-01-22 22:11:17 -0800418nla_put_failure:
Jiri Bencf0706e822007-05-05 11:45:53 -0700419 skb_trim(skb, p - skb->data);*/
420 return -1;
421}
422
423
424static int wme_classop_graft(struct Qdisc *qd, unsigned long arg,
425 struct Qdisc *new, struct Qdisc **old)
426{
427 struct ieee80211_sched_data *q = qdisc_priv(qd);
428 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
429 struct ieee80211_hw *hw = &local->hw;
430 unsigned long queue = arg - 1;
431
432 if (queue >= hw->queues)
433 return -EINVAL;
434
435 if (!new)
436 new = &noop_qdisc;
437
438 sch_tree_lock(qd);
439 *old = q->queues[queue];
440 q->queues[queue] = new;
441 qdisc_reset(*old);
442 sch_tree_unlock(qd);
443
444 return 0;
445}
446
447
448static struct Qdisc *
449wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
450{
451 struct ieee80211_sched_data *q = qdisc_priv(qd);
452 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
453 struct ieee80211_hw *hw = &local->hw;
454 unsigned long queue = arg - 1;
455
456 if (queue >= hw->queues)
457 return NULL;
458
459 return q->queues[queue];
460}
461
462
463static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
464{
465 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
466 struct ieee80211_hw *hw = &local->hw;
467 unsigned long queue = TC_H_MIN(classid);
468
469 if (queue - 1 >= hw->queues)
470 return 0;
471
472 return queue;
473}
474
475
476static unsigned long wme_classop_bind(struct Qdisc *qd, unsigned long parent,
477 u32 classid)
478{
479 return wme_classop_get(qd, classid);
480}
481
482
483static void wme_classop_put(struct Qdisc *q, unsigned long cl)
484{
485}
486
487
488static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent,
Patrick McHardy1e904742008-01-22 22:11:17 -0800489 struct nlattr **tca, unsigned long *arg)
Jiri Bencf0706e822007-05-05 11:45:53 -0700490{
491 unsigned long cl = *arg;
492 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
493 struct ieee80211_hw *hw = &local->hw;
494
495 if (cl - 1 > hw->queues)
496 return -ENOENT;
497
498 /* TODO: put code to program hardware queue parameters here,
499 * to allow programming from tc command line */
500
501 return 0;
502}
503
504
505/* we don't support deleting hardware queues
506 * when we add WMM-SA support - TSPECs may be deleted here */
507static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
508{
509 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
510 struct ieee80211_hw *hw = &local->hw;
511
512 if (cl - 1 > hw->queues)
513 return -ENOENT;
514 return 0;
515}
516
517
518static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl,
519 struct sk_buff *skb, struct tcmsg *tcm)
520{
521 struct ieee80211_sched_data *q = qdisc_priv(qd);
522 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
523 struct ieee80211_hw *hw = &local->hw;
524
525 if (cl - 1 > hw->queues)
526 return -ENOENT;
527 tcm->tcm_handle = TC_H_MIN(cl);
528 tcm->tcm_parent = qd->handle;
529 tcm->tcm_info = q->queues[cl-1]->handle; /* do we need this? */
530 return 0;
531}
532
533
534static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
535{
536 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
537 struct ieee80211_hw *hw = &local->hw;
538 int queue;
539
540 if (arg->stop)
541 return;
542
543 for (queue = 0; queue < hw->queues; queue++) {
544 if (arg->count < arg->skip) {
545 arg->count++;
546 continue;
547 }
548 /* we should return classids for our internal queues here
549 * as well as the external ones */
550 if (arg->fn(qd, queue+1, arg) < 0) {
551 arg->stop = 1;
552 break;
553 }
554 arg->count++;
555 }
556}
557
558
559static struct tcf_proto ** wme_classop_find_tcf(struct Qdisc *qd,
560 unsigned long cl)
561{
562 struct ieee80211_sched_data *q = qdisc_priv(qd);
563
564 if (cl)
565 return NULL;
566
567 return &q->filter_list;
568}
569
570
571/* this qdisc is classful (i.e. has classes, some of which may have leaf qdiscs attached)
572 * - these are the operations on the classes */
Eric Dumazet20fea082007-11-14 01:44:41 -0800573static const struct Qdisc_class_ops class_ops =
Jiri Bencf0706e822007-05-05 11:45:53 -0700574{
575 .graft = wme_classop_graft,
576 .leaf = wme_classop_leaf,
577
578 .get = wme_classop_get,
579 .put = wme_classop_put,
580 .change = wme_classop_change,
581 .delete = wme_classop_delete,
582 .walk = wme_classop_walk,
583
584 .tcf_chain = wme_classop_find_tcf,
585 .bind_tcf = wme_classop_bind,
586 .unbind_tcf = wme_classop_put,
587
588 .dump = wme_classop_dump_class,
589};
590
591
592/* queueing discipline operations */
Eric Dumazet20fea082007-11-14 01:44:41 -0800593static struct Qdisc_ops wme_qdisc_ops __read_mostly =
Jiri Bencf0706e822007-05-05 11:45:53 -0700594{
595 .next = NULL,
596 .cl_ops = &class_ops,
597 .id = "ieee80211",
598 .priv_size = sizeof(struct ieee80211_sched_data),
599
600 .enqueue = wme_qdiscop_enqueue,
601 .dequeue = wme_qdiscop_dequeue,
602 .requeue = wme_qdiscop_requeue,
603 .drop = NULL, /* drop not needed since we are always the root qdisc */
604
605 .init = wme_qdiscop_init,
606 .reset = wme_qdiscop_reset,
607 .destroy = wme_qdiscop_destroy,
608 .change = wme_qdiscop_tune,
609
610 .dump = wme_qdiscop_dump,
611};
612
613
614void ieee80211_install_qdisc(struct net_device *dev)
615{
616 struct Qdisc *qdisc;
617
618 qdisc = qdisc_create_dflt(dev, &wme_qdisc_ops, TC_H_ROOT);
619 if (!qdisc) {
620 printk(KERN_ERR "%s: qdisc installation failed\n", dev->name);
621 return;
622 }
623
624 /* same handle as would be allocated by qdisc_alloc_handle() */
625 qdisc->handle = 0x80010000;
626
627 qdisc_lock_tree(dev);
628 list_add_tail(&qdisc->list, &dev->qdisc_list);
629 dev->qdisc_sleeping = qdisc;
630 qdisc_unlock_tree(dev);
631}
632
633
634int ieee80211_qdisc_installed(struct net_device *dev)
635{
636 return dev->qdisc_sleeping->ops == &wme_qdisc_ops;
637}
638
639
640int ieee80211_wme_register(void)
641{
642 return register_qdisc(&wme_qdisc_ops);
643}
644
645
646void ieee80211_wme_unregister(void)
647{
648 unregister_qdisc(&wme_qdisc_ops);
649}
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200650
651int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
652 struct sta_info *sta, u16 tid)
653{
654 int i;
655 struct ieee80211_sched_data *q =
656 qdisc_priv(local->mdev->qdisc_sleeping);
657 DECLARE_MAC_BUF(mac);
658
659 /* prepare the filter and save it for the SW queue
660 * matching the recieved HW queue */
661
662 /* try to get a Qdisc from the pool */
663 for (i = IEEE80211_TX_QUEUE_BEACON; i < local->hw.queues; i++)
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200664 if (!test_and_set_bit(i, q->qdisc_pool)) {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200665 ieee80211_stop_queue(local_to_hw(local), i);
666 sta->tid_to_tx_q[tid] = i;
667
668 /* IF there are already pending packets
669 * on this tid first we need to drain them
670 * on the previous queue
671 * since HT is strict in order */
672#ifdef CONFIG_MAC80211_HT_DEBUG
673 if (net_ratelimit())
674 printk(KERN_DEBUG "allocated aggregation queue"
Tomas Winkler995ad6c2008-06-12 20:08:19 +0300675 " %d tid %d addr %s pool=0x%lX\n",
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200676 i, tid, print_mac(mac, sta->addr),
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200677 q->qdisc_pool[0]);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200678#endif /* CONFIG_MAC80211_HT_DEBUG */
679 return 0;
680 }
681
682 return -EAGAIN;
683}
684
685/**
686 * the caller needs to hold local->mdev->queue_lock
687 */
688void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
689 struct sta_info *sta, u16 tid,
690 u8 requeue)
691{
692 struct ieee80211_sched_data *q =
693 qdisc_priv(local->mdev->qdisc_sleeping);
694 int agg_queue = sta->tid_to_tx_q[tid];
695
696 /* return the qdisc to the pool */
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200697 clear_bit(agg_queue, q->qdisc_pool);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200698 sta->tid_to_tx_q[tid] = local->hw.queues;
699
700 if (requeue)
701 ieee80211_requeue(local, agg_queue);
702 else
703 q->queues[agg_queue]->ops->reset(q->queues[agg_queue]);
704}
705
706void ieee80211_requeue(struct ieee80211_local *local, int queue)
707{
708 struct Qdisc *root_qd = local->mdev->qdisc_sleeping;
709 struct ieee80211_sched_data *q = qdisc_priv(root_qd);
710 struct Qdisc *qdisc = q->queues[queue];
711 struct sk_buff *skb = NULL;
Ron Rindjunsky0da926f2008-04-23 13:45:12 +0300712 u32 len;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200713
714 if (!qdisc || !qdisc->dequeue)
715 return;
716
717 printk(KERN_DEBUG "requeue: qlen = %d\n", qdisc->q.qlen);
718 for (len = qdisc->q.qlen; len > 0; len--) {
719 skb = qdisc->dequeue(qdisc);
720 root_qd->q.qlen--;
721 /* packet will be classified again and */
722 /* skb->packet_data->queue will be overridden if needed */
723 if (skb)
724 wme_qdiscop_enqueue(skb, root_qd);
725 }
726}