blob: dc1598b86004e9df03e4eefb188b17ca292171b9 [file] [log] [blame]
Jiri Bencf0706e82007-05-05 11:45:53 -07001/*
2 * Copyright 2004, Instant802 Networks, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/netdevice.h>
10#include <linux/skbuff.h>
11#include <linux/module.h>
12#include <linux/if_arp.h>
13#include <linux/types.h>
14#include <net/ip.h>
15#include <net/pkt_sched.h>
16
17#include <net/mac80211.h>
18#include "ieee80211_i.h"
19#include "wme.h"
20
Jiri Bencf0706e82007-05-05 11:45:53 -070021/* maximum number of hardware queues we support. */
Ron Rindjunsky9e723492008-01-28 14:07:18 +020022#define TC_80211_MAX_QUEUES 16
23
24const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
Jiri Bencf0706e82007-05-05 11:45:53 -070025
26struct ieee80211_sched_data
27{
Ron Rindjunskya9af2012008-01-30 12:58:45 +020028 unsigned long qdisc_pool[BITS_TO_LONGS(TC_80211_MAX_QUEUES)];
Jiri Bencf0706e82007-05-05 11:45:53 -070029 struct tcf_proto *filter_list;
30 struct Qdisc *queues[TC_80211_MAX_QUEUES];
31 struct sk_buff_head requeued[TC_80211_MAX_QUEUES];
32};
33
Guy Cohena8bdf292008-01-09 19:12:48 +020034static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
Jiri Bencf0706e82007-05-05 11:45:53 -070035
36/* given a data frame determine the 802.1p/1d tag to use */
37static inline unsigned classify_1d(struct sk_buff *skb, struct Qdisc *qd)
38{
39 struct iphdr *ip;
40 int dscp;
41 int offset;
42
43 struct ieee80211_sched_data *q = qdisc_priv(qd);
44 struct tcf_result res = { -1, 0 };
45
46 /* if there is a user set filter list, call out to that */
47 if (q->filter_list) {
48 tc_classify(skb, q->filter_list, &res);
49 if (res.class != -1)
50 return res.class;
51 }
52
53 /* skb->priority values from 256->263 are magic values to
54 * directly indicate a specific 802.1d priority.
55 * This is used to allow 802.1d priority to be passed directly in
56 * from VLAN tags, etc. */
57 if (skb->priority >= 256 && skb->priority <= 263)
58 return skb->priority - 256;
59
60 /* check there is a valid IP header present */
Guy Cohena8bdf292008-01-09 19:12:48 +020061 offset = ieee80211_get_hdrlen_from_skb(skb);
62 if (skb->len < offset + sizeof(llc_ip_hdr) + sizeof(*ip) ||
63 memcmp(skb->data + offset, llc_ip_hdr, sizeof(llc_ip_hdr)))
Jiri Bencf0706e82007-05-05 11:45:53 -070064 return 0;
65
Guy Cohena8bdf292008-01-09 19:12:48 +020066 ip = (struct iphdr *) (skb->data + offset + sizeof(llc_ip_hdr));
Jiri Bencf0706e82007-05-05 11:45:53 -070067
68 dscp = ip->tos & 0xfc;
69 if (dscp & 0x1c)
70 return 0;
71 return dscp >> 5;
72}
73
74
75static inline int wme_downgrade_ac(struct sk_buff *skb)
76{
77 switch (skb->priority) {
78 case 6:
79 case 7:
80 skb->priority = 5; /* VO -> VI */
81 return 0;
82 case 4:
83 case 5:
84 skb->priority = 3; /* VI -> BE */
85 return 0;
86 case 0:
87 case 3:
88 skb->priority = 2; /* BE -> BK */
89 return 0;
90 default:
91 return -1;
92 }
93}
94
95
96/* positive return value indicates which queue to use
97 * negative return value indicates to drop the frame */
98static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd)
99{
100 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
Jiri Bencf0706e82007-05-05 11:45:53 -0700101 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
102 unsigned short fc = le16_to_cpu(hdr->frame_control);
103 int qos;
Jiri Bencf0706e82007-05-05 11:45:53 -0700104
105 /* see if frame is data or non data frame */
106 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) {
107 /* management frames go on AC_VO queue, but are sent
108 * without QoS control fields */
109 return IEEE80211_TX_QUEUE_DATA0;
110 }
111
Johannes Bergf9d540e2007-09-28 14:02:09 +0200112 if (0 /* injected */) {
113 /* use AC from radiotap */
Jiri Bencf0706e82007-05-05 11:45:53 -0700114 }
115
116 /* is this a QoS frame? */
117 qos = fc & IEEE80211_STYPE_QOS_DATA;
118
119 if (!qos) {
120 skb->priority = 0; /* required for correct WPA/11i MIC */
121 return ieee802_1d_to_ac[skb->priority];
122 }
123
124 /* use the data classifier to determine what 802.1d tag the
Johannes Berg3c3b00c2007-08-28 17:01:55 -0400125 * data frame has */
Jiri Bencf0706e82007-05-05 11:45:53 -0700126 skb->priority = classify_1d(skb, qd);
127
Johannes Berg3c3b00c2007-08-28 17:01:55 -0400128 /* in case we are a client verify acm is not set for this ac */
Jiri Bencf0706e82007-05-05 11:45:53 -0700129 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
130 if (wme_downgrade_ac(skb)) {
Johannes Berg3c3b00c2007-08-28 17:01:55 -0400131 /* No AC with lower priority has acm=0, drop packet. */
Jiri Bencf0706e82007-05-05 11:45:53 -0700132 return -1;
133 }
134 }
135
136 /* look up which queue to use for frames with this 1d tag */
137 return ieee802_1d_to_ac[skb->priority];
138}
139
140
141static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
142{
143 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
144 struct ieee80211_sched_data *q = qdisc_priv(qd);
145 struct ieee80211_tx_packet_data *pkt_data =
146 (struct ieee80211_tx_packet_data *) skb->cb;
147 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
148 unsigned short fc = le16_to_cpu(hdr->frame_control);
149 struct Qdisc *qdisc;
150 int err, queue;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200151 struct sta_info *sta;
152 u8 tid;
Jiri Bencf0706e82007-05-05 11:45:53 -0700153
Jiri Slabye8bf9642007-08-28 17:01:54 -0400154 if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200155 queue = pkt_data->queue;
Johannes Bergd0709a62008-02-25 16:27:46 +0100156 rcu_read_lock();
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200157 sta = sta_info_get(local, hdr->addr1);
158 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
159 if (sta) {
160 int ampdu_queue = sta->tid_to_tx_q[tid];
161 if ((ampdu_queue < local->hw.queues) &&
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200162 test_bit(ampdu_queue, q->qdisc_pool)) {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200163 queue = ampdu_queue;
164 pkt_data->flags |= IEEE80211_TXPD_AMPDU;
165 } else {
166 pkt_data->flags &= ~IEEE80211_TXPD_AMPDU;
167 }
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200168 }
Johannes Bergd0709a62008-02-25 16:27:46 +0100169 rcu_read_unlock();
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200170 skb_queue_tail(&q->requeued[queue], skb);
Jiri Bencf0706e82007-05-05 11:45:53 -0700171 qd->q.qlen++;
172 return 0;
173 }
174
175 queue = classify80211(skb, qd);
176
177 /* now we know the 1d priority, fill in the QoS header if there is one
178 */
179 if (WLAN_FC_IS_QOS_DATA(fc)) {
180 u8 *p = skb->data + ieee80211_get_hdrlen(fc) - 2;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200181 u8 ack_policy = 0;
182 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
Jiri Bencf0706e82007-05-05 11:45:53 -0700183 if (local->wifi_wme_noack_test)
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200184 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
Jiri Bencf0706e82007-05-05 11:45:53 -0700185 QOS_CONTROL_ACK_POLICY_SHIFT;
186 /* qos header is 2 bytes, second reserved */
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200187 *p = ack_policy | tid;
Jiri Bencf0706e82007-05-05 11:45:53 -0700188 p++;
189 *p = 0;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200190
Johannes Bergd0709a62008-02-25 16:27:46 +0100191 rcu_read_lock();
192
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200193 sta = sta_info_get(local, hdr->addr1);
194 if (sta) {
195 int ampdu_queue = sta->tid_to_tx_q[tid];
196 if ((ampdu_queue < local->hw.queues) &&
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200197 test_bit(ampdu_queue, q->qdisc_pool)) {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200198 queue = ampdu_queue;
199 pkt_data->flags |= IEEE80211_TXPD_AMPDU;
200 } else {
201 pkt_data->flags &= ~IEEE80211_TXPD_AMPDU;
202 }
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200203 }
Johannes Bergd0709a62008-02-25 16:27:46 +0100204
205 rcu_read_unlock();
Jiri Bencf0706e82007-05-05 11:45:53 -0700206 }
207
208 if (unlikely(queue >= local->hw.queues)) {
209#if 0
210 if (net_ratelimit()) {
211 printk(KERN_DEBUG "%s - queue=%d (hw does not "
212 "support) -> %d\n",
213 __func__, queue, local->hw.queues - 1);
214 }
215#endif
216 queue = local->hw.queues - 1;
217 }
218
219 if (unlikely(queue < 0)) {
220 kfree_skb(skb);
221 err = NET_XMIT_DROP;
222 } else {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200223 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
Jiri Bencf0706e82007-05-05 11:45:53 -0700224 pkt_data->queue = (unsigned int) queue;
225 qdisc = q->queues[queue];
226 err = qdisc->enqueue(skb, qdisc);
227 if (err == NET_XMIT_SUCCESS) {
228 qd->q.qlen++;
229 qd->bstats.bytes += skb->len;
230 qd->bstats.packets++;
231 return NET_XMIT_SUCCESS;
232 }
233 }
234 qd->qstats.drops++;
235 return err;
236}
237
238
239/* TODO: clean up the cases where master_hard_start_xmit
240 * returns non 0 - it shouldn't ever do that. Once done we
241 * can remove this function */
242static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd)
243{
244 struct ieee80211_sched_data *q = qdisc_priv(qd);
245 struct ieee80211_tx_packet_data *pkt_data =
246 (struct ieee80211_tx_packet_data *) skb->cb;
247 struct Qdisc *qdisc;
248 int err;
249
250 /* we recorded which queue to use earlier! */
251 qdisc = q->queues[pkt_data->queue];
252
253 if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) {
254 qd->q.qlen++;
255 return 0;
256 }
257 qd->qstats.drops++;
258 return err;
259}
260
261
262static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
263{
264 struct ieee80211_sched_data *q = qdisc_priv(qd);
265 struct net_device *dev = qd->dev;
266 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
267 struct ieee80211_hw *hw = &local->hw;
268 struct sk_buff *skb;
269 struct Qdisc *qdisc;
270 int queue;
271
272 /* check all the h/w queues in numeric/priority order */
273 for (queue = 0; queue < hw->queues; queue++) {
274 /* see if there is room in this hardware queue */
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200275 if ((test_bit(IEEE80211_LINK_STATE_XOFF,
276 &local->state[queue])) ||
277 (test_bit(IEEE80211_LINK_STATE_PENDING,
278 &local->state[queue])) ||
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200279 (!test_bit(queue, q->qdisc_pool)))
Jiri Bencf0706e82007-05-05 11:45:53 -0700280 continue;
281
282 /* there is space - try and get a frame */
283 skb = skb_dequeue(&q->requeued[queue]);
284 if (skb) {
285 qd->q.qlen--;
286 return skb;
287 }
288
289 qdisc = q->queues[queue];
290 skb = qdisc->dequeue(qdisc);
291 if (skb) {
292 qd->q.qlen--;
293 return skb;
294 }
295 }
296 /* returning a NULL here when all the h/w queues are full means we
297 * never need to call netif_stop_queue in the driver */
298 return NULL;
299}
300
301
302static void wme_qdiscop_reset(struct Qdisc* qd)
303{
304 struct ieee80211_sched_data *q = qdisc_priv(qd);
305 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
306 struct ieee80211_hw *hw = &local->hw;
307 int queue;
308
309 /* QUESTION: should we have some hardware flush functionality here? */
310
311 for (queue = 0; queue < hw->queues; queue++) {
312 skb_queue_purge(&q->requeued[queue]);
313 qdisc_reset(q->queues[queue]);
314 }
315 qd->q.qlen = 0;
316}
317
318
319static void wme_qdiscop_destroy(struct Qdisc* qd)
320{
321 struct ieee80211_sched_data *q = qdisc_priv(qd);
322 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
323 struct ieee80211_hw *hw = &local->hw;
324 int queue;
325
326 tcf_destroy_chain(q->filter_list);
327 q->filter_list = NULL;
328
329 for (queue=0; queue < hw->queues; queue++) {
330 skb_queue_purge(&q->requeued[queue]);
331 qdisc_destroy(q->queues[queue]);
332 q->queues[queue] = &noop_qdisc;
333 }
334}
335
336
337/* called whenever parameters are updated on existing qdisc */
Patrick McHardy1e904742008-01-22 22:11:17 -0800338static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt)
Jiri Bencf0706e82007-05-05 11:45:53 -0700339{
340/* struct ieee80211_sched_data *q = qdisc_priv(qd);
341*/
342 /* check our options block is the right size */
343 /* copy any options to our local structure */
344/* Ignore options block for now - always use static mapping
Patrick McHardy1e904742008-01-22 22:11:17 -0800345 struct tc_ieee80211_qopt *qopt = nla_data(opt);
Jiri Bencf0706e82007-05-05 11:45:53 -0700346
Patrick McHardy1e904742008-01-22 22:11:17 -0800347 if (opt->nla_len < nla_attr_size(sizeof(*qopt)))
Jiri Bencf0706e82007-05-05 11:45:53 -0700348 return -EINVAL;
349 memcpy(q->tag2queue, qopt->tag2queue, sizeof(qopt->tag2queue));
350*/
351 return 0;
352}
353
354
355/* called during initial creation of qdisc on device */
Patrick McHardy1e904742008-01-22 22:11:17 -0800356static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
Jiri Bencf0706e82007-05-05 11:45:53 -0700357{
358 struct ieee80211_sched_data *q = qdisc_priv(qd);
359 struct net_device *dev = qd->dev;
360 struct ieee80211_local *local;
361 int queues;
362 int err = 0, i;
363
364 /* check that device is a mac80211 device */
365 if (!dev->ieee80211_ptr ||
366 dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
367 return -EINVAL;
368
369 /* check this device is an ieee80211 master type device */
370 if (dev->type != ARPHRD_IEEE80211)
371 return -EINVAL;
372
373 /* check that there is no qdisc currently attached to device
374 * this ensures that we will be the root qdisc. (I can't find a better
375 * way to test this explicitly) */
376 if (dev->qdisc_sleeping != &noop_qdisc)
377 return -EINVAL;
378
379 if (qd->flags & TCQ_F_INGRESS)
380 return -EINVAL;
381
382 local = wdev_priv(dev->ieee80211_ptr);
383 queues = local->hw.queues;
384
385 /* if options were passed in, set them */
386 if (opt) {
387 err = wme_qdiscop_tune(qd, opt);
388 }
389
390 /* create child queues */
391 for (i = 0; i < queues; i++) {
392 skb_queue_head_init(&q->requeued[i]);
393 q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops,
394 qd->handle);
Johannes Berg136e83d2007-09-10 13:55:08 +0200395 if (!q->queues[i]) {
Jiri Bencf0706e82007-05-05 11:45:53 -0700396 q->queues[i] = &noop_qdisc;
Pavel Roskina4278e12008-05-12 09:02:24 -0400397 printk(KERN_ERR "%s child qdisc %i creation failed\n",
398 dev->name, i);
Jiri Bencf0706e82007-05-05 11:45:53 -0700399 }
400 }
401
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200402 /* reserve all legacy QoS queues */
403 for (i = 0; i < min(IEEE80211_TX_QUEUE_DATA4, queues); i++)
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200404 set_bit(i, q->qdisc_pool);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200405
Jiri Bencf0706e82007-05-05 11:45:53 -0700406 return err;
407}
408
409static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb)
410{
411/* struct ieee80211_sched_data *q = qdisc_priv(qd);
412 unsigned char *p = skb->tail;
413 struct tc_ieee80211_qopt opt;
414
415 memcpy(&opt.tag2queue, q->tag2queue, TC_80211_MAX_TAG + 1);
Patrick McHardy1e904742008-01-22 22:11:17 -0800416 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
Jiri Bencf0706e82007-05-05 11:45:53 -0700417*/ return skb->len;
418/*
Patrick McHardy1e904742008-01-22 22:11:17 -0800419nla_put_failure:
Jiri Bencf0706e82007-05-05 11:45:53 -0700420 skb_trim(skb, p - skb->data);*/
421 return -1;
422}
423
424
425static int wme_classop_graft(struct Qdisc *qd, unsigned long arg,
426 struct Qdisc *new, struct Qdisc **old)
427{
428 struct ieee80211_sched_data *q = qdisc_priv(qd);
429 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
430 struct ieee80211_hw *hw = &local->hw;
431 unsigned long queue = arg - 1;
432
433 if (queue >= hw->queues)
434 return -EINVAL;
435
436 if (!new)
437 new = &noop_qdisc;
438
439 sch_tree_lock(qd);
440 *old = q->queues[queue];
441 q->queues[queue] = new;
442 qdisc_reset(*old);
443 sch_tree_unlock(qd);
444
445 return 0;
446}
447
448
449static struct Qdisc *
450wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
451{
452 struct ieee80211_sched_data *q = qdisc_priv(qd);
453 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
454 struct ieee80211_hw *hw = &local->hw;
455 unsigned long queue = arg - 1;
456
457 if (queue >= hw->queues)
458 return NULL;
459
460 return q->queues[queue];
461}
462
463
464static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
465{
466 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
467 struct ieee80211_hw *hw = &local->hw;
468 unsigned long queue = TC_H_MIN(classid);
469
470 if (queue - 1 >= hw->queues)
471 return 0;
472
473 return queue;
474}
475
476
477static unsigned long wme_classop_bind(struct Qdisc *qd, unsigned long parent,
478 u32 classid)
479{
480 return wme_classop_get(qd, classid);
481}
482
483
484static void wme_classop_put(struct Qdisc *q, unsigned long cl)
485{
486}
487
488
489static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent,
Patrick McHardy1e904742008-01-22 22:11:17 -0800490 struct nlattr **tca, unsigned long *arg)
Jiri Bencf0706e82007-05-05 11:45:53 -0700491{
492 unsigned long cl = *arg;
493 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
494 struct ieee80211_hw *hw = &local->hw;
495
496 if (cl - 1 > hw->queues)
497 return -ENOENT;
498
499 /* TODO: put code to program hardware queue parameters here,
500 * to allow programming from tc command line */
501
502 return 0;
503}
504
505
506/* we don't support deleting hardware queues
507 * when we add WMM-SA support - TSPECs may be deleted here */
508static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
509{
510 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
511 struct ieee80211_hw *hw = &local->hw;
512
513 if (cl - 1 > hw->queues)
514 return -ENOENT;
515 return 0;
516}
517
518
519static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl,
520 struct sk_buff *skb, struct tcmsg *tcm)
521{
522 struct ieee80211_sched_data *q = qdisc_priv(qd);
523 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
524 struct ieee80211_hw *hw = &local->hw;
525
526 if (cl - 1 > hw->queues)
527 return -ENOENT;
528 tcm->tcm_handle = TC_H_MIN(cl);
529 tcm->tcm_parent = qd->handle;
530 tcm->tcm_info = q->queues[cl-1]->handle; /* do we need this? */
531 return 0;
532}
533
534
535static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
536{
537 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
538 struct ieee80211_hw *hw = &local->hw;
539 int queue;
540
541 if (arg->stop)
542 return;
543
544 for (queue = 0; queue < hw->queues; queue++) {
545 if (arg->count < arg->skip) {
546 arg->count++;
547 continue;
548 }
549 /* we should return classids for our internal queues here
550 * as well as the external ones */
551 if (arg->fn(qd, queue+1, arg) < 0) {
552 arg->stop = 1;
553 break;
554 }
555 arg->count++;
556 }
557}
558
559
560static struct tcf_proto ** wme_classop_find_tcf(struct Qdisc *qd,
561 unsigned long cl)
562{
563 struct ieee80211_sched_data *q = qdisc_priv(qd);
564
565 if (cl)
566 return NULL;
567
568 return &q->filter_list;
569}
570
571
572/* this qdisc is classful (i.e. has classes, some of which may have leaf qdiscs attached)
573 * - these are the operations on the classes */
Eric Dumazet20fea082007-11-14 01:44:41 -0800574static const struct Qdisc_class_ops class_ops =
Jiri Bencf0706e82007-05-05 11:45:53 -0700575{
576 .graft = wme_classop_graft,
577 .leaf = wme_classop_leaf,
578
579 .get = wme_classop_get,
580 .put = wme_classop_put,
581 .change = wme_classop_change,
582 .delete = wme_classop_delete,
583 .walk = wme_classop_walk,
584
585 .tcf_chain = wme_classop_find_tcf,
586 .bind_tcf = wme_classop_bind,
587 .unbind_tcf = wme_classop_put,
588
589 .dump = wme_classop_dump_class,
590};
591
592
593/* queueing discipline operations */
Eric Dumazet20fea082007-11-14 01:44:41 -0800594static struct Qdisc_ops wme_qdisc_ops __read_mostly =
Jiri Bencf0706e82007-05-05 11:45:53 -0700595{
596 .next = NULL,
597 .cl_ops = &class_ops,
598 .id = "ieee80211",
599 .priv_size = sizeof(struct ieee80211_sched_data),
600
601 .enqueue = wme_qdiscop_enqueue,
602 .dequeue = wme_qdiscop_dequeue,
603 .requeue = wme_qdiscop_requeue,
604 .drop = NULL, /* drop not needed since we are always the root qdisc */
605
606 .init = wme_qdiscop_init,
607 .reset = wme_qdiscop_reset,
608 .destroy = wme_qdiscop_destroy,
609 .change = wme_qdiscop_tune,
610
611 .dump = wme_qdiscop_dump,
612};
613
614
615void ieee80211_install_qdisc(struct net_device *dev)
616{
617 struct Qdisc *qdisc;
618
619 qdisc = qdisc_create_dflt(dev, &wme_qdisc_ops, TC_H_ROOT);
620 if (!qdisc) {
621 printk(KERN_ERR "%s: qdisc installation failed\n", dev->name);
622 return;
623 }
624
625 /* same handle as would be allocated by qdisc_alloc_handle() */
626 qdisc->handle = 0x80010000;
627
628 qdisc_lock_tree(dev);
629 list_add_tail(&qdisc->list, &dev->qdisc_list);
630 dev->qdisc_sleeping = qdisc;
631 qdisc_unlock_tree(dev);
632}
633
634
635int ieee80211_qdisc_installed(struct net_device *dev)
636{
637 return dev->qdisc_sleeping->ops == &wme_qdisc_ops;
638}
639
640
641int ieee80211_wme_register(void)
642{
643 return register_qdisc(&wme_qdisc_ops);
644}
645
646
647void ieee80211_wme_unregister(void)
648{
649 unregister_qdisc(&wme_qdisc_ops);
650}
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200651
652int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
653 struct sta_info *sta, u16 tid)
654{
655 int i;
656 struct ieee80211_sched_data *q =
657 qdisc_priv(local->mdev->qdisc_sleeping);
658 DECLARE_MAC_BUF(mac);
659
660 /* prepare the filter and save it for the SW queue
661 * matching the recieved HW queue */
662
663 /* try to get a Qdisc from the pool */
664 for (i = IEEE80211_TX_QUEUE_BEACON; i < local->hw.queues; i++)
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200665 if (!test_and_set_bit(i, q->qdisc_pool)) {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200666 ieee80211_stop_queue(local_to_hw(local), i);
667 sta->tid_to_tx_q[tid] = i;
668
669 /* IF there are already pending packets
670 * on this tid first we need to drain them
671 * on the previous queue
672 * since HT is strict in order */
673#ifdef CONFIG_MAC80211_HT_DEBUG
674 if (net_ratelimit())
675 printk(KERN_DEBUG "allocated aggregation queue"
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200676 " %d tid %d addr %s pool=0x%lX",
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200677 i, tid, print_mac(mac, sta->addr),
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200678 q->qdisc_pool[0]);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200679#endif /* CONFIG_MAC80211_HT_DEBUG */
680 return 0;
681 }
682
683 return -EAGAIN;
684}
685
686/**
687 * the caller needs to hold local->mdev->queue_lock
688 */
689void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
690 struct sta_info *sta, u16 tid,
691 u8 requeue)
692{
693 struct ieee80211_sched_data *q =
694 qdisc_priv(local->mdev->qdisc_sleeping);
695 int agg_queue = sta->tid_to_tx_q[tid];
696
697 /* return the qdisc to the pool */
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200698 clear_bit(agg_queue, q->qdisc_pool);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200699 sta->tid_to_tx_q[tid] = local->hw.queues;
700
701 if (requeue)
702 ieee80211_requeue(local, agg_queue);
703 else
704 q->queues[agg_queue]->ops->reset(q->queues[agg_queue]);
705}
706
707void ieee80211_requeue(struct ieee80211_local *local, int queue)
708{
709 struct Qdisc *root_qd = local->mdev->qdisc_sleeping;
710 struct ieee80211_sched_data *q = qdisc_priv(root_qd);
711 struct Qdisc *qdisc = q->queues[queue];
712 struct sk_buff *skb = NULL;
Ron Rindjunsky0da926f2008-04-23 13:45:12 +0300713 u32 len;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200714
715 if (!qdisc || !qdisc->dequeue)
716 return;
717
718 printk(KERN_DEBUG "requeue: qlen = %d\n", qdisc->q.qlen);
719 for (len = qdisc->q.qlen; len > 0; len--) {
720 skb = qdisc->dequeue(qdisc);
721 root_qd->q.qlen--;
722 /* packet will be classified again and */
723 /* skb->packet_data->queue will be overridden if needed */
724 if (skb)
725 wme_qdiscop_enqueue(skb, root_qd);
726 }
727}