blob: f014cd38c2d0961dda25ba70c19425fec6ec0235 [file] [log] [blame]
Jiri Bencf0706e82007-05-05 11:45:53 -07001/*
2 * Copyright 2004, Instant802 Networks, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/netdevice.h>
10#include <linux/skbuff.h>
11#include <linux/module.h>
12#include <linux/if_arp.h>
13#include <linux/types.h>
14#include <net/ip.h>
15#include <net/pkt_sched.h>
16
17#include <net/mac80211.h>
18#include "ieee80211_i.h"
19#include "wme.h"
20
Jiri Bencf0706e82007-05-05 11:45:53 -070021/* maximum number of hardware queues we support. */
Johannes Berge100bb62008-04-30 18:51:21 +020022#define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES)
23/* current number of hardware queues we support. */
24#define QD_NUM(hw) ((hw)->queues + (hw)->ampdu_queues)
Ron Rindjunsky9e723492008-01-28 14:07:18 +020025
Johannes Berge100bb62008-04-30 18:51:21 +020026/*
27 * Default mapping in classifier to work with default
28 * queue setup.
29 */
Ron Rindjunsky9e723492008-01-28 14:07:18 +020030const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
Jiri Bencf0706e82007-05-05 11:45:53 -070031
32struct ieee80211_sched_data
33{
Johannes Berge100bb62008-04-30 18:51:21 +020034 unsigned long qdisc_pool[BITS_TO_LONGS(QD_MAX_QUEUES)];
Jiri Bencf0706e82007-05-05 11:45:53 -070035 struct tcf_proto *filter_list;
Johannes Berge100bb62008-04-30 18:51:21 +020036 struct Qdisc *queues[QD_MAX_QUEUES];
37 struct sk_buff_head requeued[QD_MAX_QUEUES];
Jiri Bencf0706e82007-05-05 11:45:53 -070038};
39
Guy Cohena8bdf292008-01-09 19:12:48 +020040static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
Jiri Bencf0706e82007-05-05 11:45:53 -070041
42/* given a data frame determine the 802.1p/1d tag to use */
43static inline unsigned classify_1d(struct sk_buff *skb, struct Qdisc *qd)
44{
45 struct iphdr *ip;
46 int dscp;
47 int offset;
48
49 struct ieee80211_sched_data *q = qdisc_priv(qd);
50 struct tcf_result res = { -1, 0 };
51
52 /* if there is a user set filter list, call out to that */
53 if (q->filter_list) {
54 tc_classify(skb, q->filter_list, &res);
55 if (res.class != -1)
56 return res.class;
57 }
58
59 /* skb->priority values from 256->263 are magic values to
60 * directly indicate a specific 802.1d priority.
61 * This is used to allow 802.1d priority to be passed directly in
62 * from VLAN tags, etc. */
63 if (skb->priority >= 256 && skb->priority <= 263)
64 return skb->priority - 256;
65
66 /* check there is a valid IP header present */
Guy Cohena8bdf292008-01-09 19:12:48 +020067 offset = ieee80211_get_hdrlen_from_skb(skb);
68 if (skb->len < offset + sizeof(llc_ip_hdr) + sizeof(*ip) ||
69 memcmp(skb->data + offset, llc_ip_hdr, sizeof(llc_ip_hdr)))
Jiri Bencf0706e82007-05-05 11:45:53 -070070 return 0;
71
Guy Cohena8bdf292008-01-09 19:12:48 +020072 ip = (struct iphdr *) (skb->data + offset + sizeof(llc_ip_hdr));
Jiri Bencf0706e82007-05-05 11:45:53 -070073
74 dscp = ip->tos & 0xfc;
75 if (dscp & 0x1c)
76 return 0;
77 return dscp >> 5;
78}
79
80
81static inline int wme_downgrade_ac(struct sk_buff *skb)
82{
83 switch (skb->priority) {
84 case 6:
85 case 7:
86 skb->priority = 5; /* VO -> VI */
87 return 0;
88 case 4:
89 case 5:
90 skb->priority = 3; /* VI -> BE */
91 return 0;
92 case 0:
93 case 3:
94 skb->priority = 2; /* BE -> BK */
95 return 0;
96 default:
97 return -1;
98 }
99}
100
101
102/* positive return value indicates which queue to use
103 * negative return value indicates to drop the frame */
Johannes Berge100bb62008-04-30 18:51:21 +0200104static int classify80211(struct sk_buff *skb, struct Qdisc *qd)
Jiri Bencf0706e82007-05-05 11:45:53 -0700105{
David S. Miller5ce2d482008-07-08 17:06:30 -0700106 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
Jiri Bencf0706e82007-05-05 11:45:53 -0700107 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
Jiri Bencf0706e82007-05-05 11:45:53 -0700108
Harvey Harrison002aaf42008-06-11 14:21:59 -0700109 if (!ieee80211_is_data(hdr->frame_control)) {
Jiri Bencf0706e82007-05-05 11:45:53 -0700110 /* management frames go on AC_VO queue, but are sent
111 * without QoS control fields */
Johannes Berge100bb62008-04-30 18:51:21 +0200112 return 0;
Jiri Bencf0706e82007-05-05 11:45:53 -0700113 }
114
Johannes Bergf9d540e2007-09-28 14:02:09 +0200115 if (0 /* injected */) {
116 /* use AC from radiotap */
Jiri Bencf0706e82007-05-05 11:45:53 -0700117 }
118
Harvey Harrison002aaf42008-06-11 14:21:59 -0700119 if (!ieee80211_is_data_qos(hdr->frame_control)) {
Jiri Bencf0706e82007-05-05 11:45:53 -0700120 skb->priority = 0; /* required for correct WPA/11i MIC */
121 return ieee802_1d_to_ac[skb->priority];
122 }
123
124 /* use the data classifier to determine what 802.1d tag the
Johannes Berg3c3b00c2007-08-28 17:01:55 -0400125 * data frame has */
Jiri Bencf0706e82007-05-05 11:45:53 -0700126 skb->priority = classify_1d(skb, qd);
127
Johannes Berg3c3b00c2007-08-28 17:01:55 -0400128 /* in case we are a client verify acm is not set for this ac */
Jiri Bencf0706e82007-05-05 11:45:53 -0700129 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
130 if (wme_downgrade_ac(skb)) {
Johannes Berg3c3b00c2007-08-28 17:01:55 -0400131 /* No AC with lower priority has acm=0, drop packet. */
Jiri Bencf0706e82007-05-05 11:45:53 -0700132 return -1;
133 }
134 }
135
136 /* look up which queue to use for frames with this 1d tag */
137 return ieee802_1d_to_ac[skb->priority];
138}
139
140
141static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
142{
David S. Miller5ce2d482008-07-08 17:06:30 -0700143 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
Johannes Berge100bb62008-04-30 18:51:21 +0200144 struct ieee80211_hw *hw = &local->hw;
Jiri Bencf0706e82007-05-05 11:45:53 -0700145 struct ieee80211_sched_data *q = qdisc_priv(qd);
Johannes Berge039fa42008-05-15 12:55:29 +0200146 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Jiri Bencf0706e82007-05-05 11:45:53 -0700147 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
Jiri Bencf0706e82007-05-05 11:45:53 -0700148 struct Qdisc *qdisc;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200149 struct sta_info *sta;
Johannes Berg5c5e1282008-05-03 00:44:09 +0200150 int err, queue;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200151 u8 tid;
Jiri Bencf0706e82007-05-05 11:45:53 -0700152
Johannes Berge039fa42008-05-15 12:55:29 +0200153 if (info->flags & IEEE80211_TX_CTL_REQUEUE) {
Johannes Berge2530082008-05-17 00:57:14 +0200154 queue = skb_get_queue_mapping(skb);
Johannes Bergd0709a62008-02-25 16:27:46 +0100155 rcu_read_lock();
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200156 sta = sta_info_get(local, hdr->addr1);
Harvey Harrison238f74a2008-07-02 11:05:34 -0700157 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200158 if (sta) {
159 int ampdu_queue = sta->tid_to_tx_q[tid];
Johannes Berge100bb62008-04-30 18:51:21 +0200160 if ((ampdu_queue < QD_NUM(hw)) &&
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200161 test_bit(ampdu_queue, q->qdisc_pool)) {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200162 queue = ampdu_queue;
Johannes Berge039fa42008-05-15 12:55:29 +0200163 info->flags |= IEEE80211_TX_CTL_AMPDU;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200164 } else {
Johannes Berge039fa42008-05-15 12:55:29 +0200165 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200166 }
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200167 }
Johannes Bergd0709a62008-02-25 16:27:46 +0100168 rcu_read_unlock();
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200169 skb_queue_tail(&q->requeued[queue], skb);
Jiri Bencf0706e82007-05-05 11:45:53 -0700170 qd->q.qlen++;
171 return 0;
172 }
173
174 queue = classify80211(skb, qd);
175
Johannes Berge100bb62008-04-30 18:51:21 +0200176 if (unlikely(queue >= local->hw.queues))
177 queue = local->hw.queues - 1;
178
Jiri Bencf0706e82007-05-05 11:45:53 -0700179 /* now we know the 1d priority, fill in the QoS header if there is one
180 */
Harvey Harrison002aaf42008-06-11 14:21:59 -0700181 if (ieee80211_is_data_qos(hdr->frame_control)) {
182 u8 *p = ieee80211_get_qos_ctl(hdr);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200183 u8 ack_policy = 0;
Harvey Harrison238f74a2008-07-02 11:05:34 -0700184 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
Jiri Bencf0706e82007-05-05 11:45:53 -0700185 if (local->wifi_wme_noack_test)
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200186 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
Jiri Bencf0706e82007-05-05 11:45:53 -0700187 QOS_CONTROL_ACK_POLICY_SHIFT;
188 /* qos header is 2 bytes, second reserved */
Harvey Harrison002aaf42008-06-11 14:21:59 -0700189 *p++ = ack_policy | tid;
Jiri Bencf0706e82007-05-05 11:45:53 -0700190 *p = 0;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200191
Johannes Bergd0709a62008-02-25 16:27:46 +0100192 rcu_read_lock();
193
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200194 sta = sta_info_get(local, hdr->addr1);
195 if (sta) {
196 int ampdu_queue = sta->tid_to_tx_q[tid];
Johannes Berge100bb62008-04-30 18:51:21 +0200197 if ((ampdu_queue < QD_NUM(hw)) &&
198 test_bit(ampdu_queue, q->qdisc_pool)) {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200199 queue = ampdu_queue;
Johannes Berge039fa42008-05-15 12:55:29 +0200200 info->flags |= IEEE80211_TX_CTL_AMPDU;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200201 } else {
Johannes Berge039fa42008-05-15 12:55:29 +0200202 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200203 }
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200204 }
Johannes Bergd0709a62008-02-25 16:27:46 +0100205
206 rcu_read_unlock();
Jiri Bencf0706e82007-05-05 11:45:53 -0700207 }
208
Johannes Berg5c5e1282008-05-03 00:44:09 +0200209 if (unlikely(queue < 0)) {
210 kfree_skb(skb);
211 err = NET_XMIT_DROP;
212 } else {
Johannes Berge2530082008-05-17 00:57:14 +0200213 skb_set_queue_mapping(skb, queue);
Johannes Berg5c5e1282008-05-03 00:44:09 +0200214 qdisc = q->queues[queue];
215 err = qdisc->enqueue(skb, qdisc);
216 if (err == NET_XMIT_SUCCESS) {
217 qd->q.qlen++;
218 qd->bstats.bytes += skb->len;
219 qd->bstats.packets++;
220 return NET_XMIT_SUCCESS;
221 }
Jiri Bencf0706e82007-05-05 11:45:53 -0700222 }
223 qd->qstats.drops++;
224 return err;
225}
226
227
228/* TODO: clean up the cases where master_hard_start_xmit
229 * returns non 0 - it shouldn't ever do that. Once done we
230 * can remove this function */
231static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd)
232{
233 struct ieee80211_sched_data *q = qdisc_priv(qd);
Jiri Bencf0706e82007-05-05 11:45:53 -0700234 struct Qdisc *qdisc;
235 int err;
236
237 /* we recorded which queue to use earlier! */
Johannes Berge2530082008-05-17 00:57:14 +0200238 qdisc = q->queues[skb_get_queue_mapping(skb)];
Jiri Bencf0706e82007-05-05 11:45:53 -0700239
240 if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) {
241 qd->q.qlen++;
242 return 0;
243 }
244 qd->qstats.drops++;
245 return err;
246}
247
248
249static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
250{
251 struct ieee80211_sched_data *q = qdisc_priv(qd);
David S. Miller5ce2d482008-07-08 17:06:30 -0700252 struct net_device *dev = qdisc_dev(qd);
Jiri Bencf0706e82007-05-05 11:45:53 -0700253 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
254 struct ieee80211_hw *hw = &local->hw;
255 struct sk_buff *skb;
256 struct Qdisc *qdisc;
257 int queue;
258
259 /* check all the h/w queues in numeric/priority order */
Johannes Berge100bb62008-04-30 18:51:21 +0200260 for (queue = 0; queue < QD_NUM(hw); queue++) {
Jiri Bencf0706e82007-05-05 11:45:53 -0700261 /* see if there is room in this hardware queue */
Johannes Berge2530082008-05-17 00:57:14 +0200262 if (__netif_subqueue_stopped(local->mdev, queue) ||
263 !test_bit(queue, q->qdisc_pool))
Jiri Bencf0706e82007-05-05 11:45:53 -0700264 continue;
265
266 /* there is space - try and get a frame */
267 skb = skb_dequeue(&q->requeued[queue]);
268 if (skb) {
269 qd->q.qlen--;
270 return skb;
271 }
272
273 qdisc = q->queues[queue];
274 skb = qdisc->dequeue(qdisc);
275 if (skb) {
276 qd->q.qlen--;
277 return skb;
278 }
279 }
280 /* returning a NULL here when all the h/w queues are full means we
281 * never need to call netif_stop_queue in the driver */
282 return NULL;
283}
284
285
286static void wme_qdiscop_reset(struct Qdisc* qd)
287{
288 struct ieee80211_sched_data *q = qdisc_priv(qd);
David S. Miller5ce2d482008-07-08 17:06:30 -0700289 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
Jiri Bencf0706e82007-05-05 11:45:53 -0700290 struct ieee80211_hw *hw = &local->hw;
291 int queue;
292
293 /* QUESTION: should we have some hardware flush functionality here? */
294
Johannes Berge100bb62008-04-30 18:51:21 +0200295 for (queue = 0; queue < QD_NUM(hw); queue++) {
Jiri Bencf0706e82007-05-05 11:45:53 -0700296 skb_queue_purge(&q->requeued[queue]);
297 qdisc_reset(q->queues[queue]);
298 }
299 qd->q.qlen = 0;
300}
301
302
303static void wme_qdiscop_destroy(struct Qdisc* qd)
304{
305 struct ieee80211_sched_data *q = qdisc_priv(qd);
David S. Miller5ce2d482008-07-08 17:06:30 -0700306 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
Jiri Bencf0706e82007-05-05 11:45:53 -0700307 struct ieee80211_hw *hw = &local->hw;
308 int queue;
309
Patrick McHardyff31ab52008-07-01 19:52:38 -0700310 tcf_destroy_chain(&q->filter_list);
Jiri Bencf0706e82007-05-05 11:45:53 -0700311
Johannes Berge100bb62008-04-30 18:51:21 +0200312 for (queue = 0; queue < QD_NUM(hw); queue++) {
Jiri Bencf0706e82007-05-05 11:45:53 -0700313 skb_queue_purge(&q->requeued[queue]);
314 qdisc_destroy(q->queues[queue]);
315 q->queues[queue] = &noop_qdisc;
316 }
317}
318
319
320/* called whenever parameters are updated on existing qdisc */
Patrick McHardy1e904742008-01-22 22:11:17 -0800321static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt)
Jiri Bencf0706e82007-05-05 11:45:53 -0700322{
Jiri Bencf0706e82007-05-05 11:45:53 -0700323 return 0;
324}
325
326
327/* called during initial creation of qdisc on device */
Patrick McHardy1e904742008-01-22 22:11:17 -0800328static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
Jiri Bencf0706e82007-05-05 11:45:53 -0700329{
330 struct ieee80211_sched_data *q = qdisc_priv(qd);
David S. Miller5ce2d482008-07-08 17:06:30 -0700331 struct net_device *dev = qdisc_dev(qd);
Jiri Bencf0706e82007-05-05 11:45:53 -0700332 struct ieee80211_local *local;
Johannes Berge100bb62008-04-30 18:51:21 +0200333 struct ieee80211_hw *hw;
Jiri Bencf0706e82007-05-05 11:45:53 -0700334 int err = 0, i;
335
336 /* check that device is a mac80211 device */
337 if (!dev->ieee80211_ptr ||
338 dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
339 return -EINVAL;
340
Johannes Berge100bb62008-04-30 18:51:21 +0200341 local = wdev_priv(dev->ieee80211_ptr);
342 hw = &local->hw;
343
344 /* only allow on master dev */
345 if (dev != local->mdev)
Jiri Bencf0706e82007-05-05 11:45:53 -0700346 return -EINVAL;
347
Johannes Berge100bb62008-04-30 18:51:21 +0200348 /* ensure that we are root qdisc */
349 if (qd->parent != TC_H_ROOT)
Jiri Bencf0706e82007-05-05 11:45:53 -0700350 return -EINVAL;
351
352 if (qd->flags & TCQ_F_INGRESS)
353 return -EINVAL;
354
Jiri Bencf0706e82007-05-05 11:45:53 -0700355 /* if options were passed in, set them */
Johannes Berge100bb62008-04-30 18:51:21 +0200356 if (opt)
Jiri Bencf0706e82007-05-05 11:45:53 -0700357 err = wme_qdiscop_tune(qd, opt);
Jiri Bencf0706e82007-05-05 11:45:53 -0700358
359 /* create child queues */
Johannes Berge100bb62008-04-30 18:51:21 +0200360 for (i = 0; i < QD_NUM(hw); i++) {
Jiri Bencf0706e82007-05-05 11:45:53 -0700361 skb_queue_head_init(&q->requeued[i]);
David S. Miller5ce2d482008-07-08 17:06:30 -0700362 q->queues[i] = qdisc_create_dflt(qdisc_dev(qd), qd->dev_queue,
David S. Millerbb949fb2008-07-08 16:55:56 -0700363 &pfifo_qdisc_ops,
Jiri Bencf0706e82007-05-05 11:45:53 -0700364 qd->handle);
Johannes Berg136e83d2007-09-10 13:55:08 +0200365 if (!q->queues[i]) {
Jiri Bencf0706e82007-05-05 11:45:53 -0700366 q->queues[i] = &noop_qdisc;
Pavel Roskina4278e12008-05-12 09:02:24 -0400367 printk(KERN_ERR "%s child qdisc %i creation failed\n",
368 dev->name, i);
Jiri Bencf0706e82007-05-05 11:45:53 -0700369 }
370 }
371
Johannes Berge100bb62008-04-30 18:51:21 +0200372 /* non-aggregation queues: reserve/mark as used */
373 for (i = 0; i < local->hw.queues; i++)
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200374 set_bit(i, q->qdisc_pool);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200375
Jiri Bencf0706e82007-05-05 11:45:53 -0700376 return err;
377}
378
379static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb)
380{
Jiri Bencf0706e82007-05-05 11:45:53 -0700381 return -1;
382}
383
384
385static int wme_classop_graft(struct Qdisc *qd, unsigned long arg,
386 struct Qdisc *new, struct Qdisc **old)
387{
388 struct ieee80211_sched_data *q = qdisc_priv(qd);
David S. Miller5ce2d482008-07-08 17:06:30 -0700389 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
Jiri Bencf0706e82007-05-05 11:45:53 -0700390 struct ieee80211_hw *hw = &local->hw;
391 unsigned long queue = arg - 1;
392
Johannes Berge100bb62008-04-30 18:51:21 +0200393 if (queue >= QD_NUM(hw))
Jiri Bencf0706e82007-05-05 11:45:53 -0700394 return -EINVAL;
395
396 if (!new)
397 new = &noop_qdisc;
398
399 sch_tree_lock(qd);
400 *old = q->queues[queue];
401 q->queues[queue] = new;
402 qdisc_reset(*old);
403 sch_tree_unlock(qd);
404
405 return 0;
406}
407
408
409static struct Qdisc *
410wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
411{
412 struct ieee80211_sched_data *q = qdisc_priv(qd);
David S. Miller5ce2d482008-07-08 17:06:30 -0700413 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
Jiri Bencf0706e82007-05-05 11:45:53 -0700414 struct ieee80211_hw *hw = &local->hw;
415 unsigned long queue = arg - 1;
416
Johannes Berge100bb62008-04-30 18:51:21 +0200417 if (queue >= QD_NUM(hw))
Jiri Bencf0706e82007-05-05 11:45:53 -0700418 return NULL;
419
420 return q->queues[queue];
421}
422
423
424static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
425{
David S. Miller5ce2d482008-07-08 17:06:30 -0700426 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
Jiri Bencf0706e82007-05-05 11:45:53 -0700427 struct ieee80211_hw *hw = &local->hw;
428 unsigned long queue = TC_H_MIN(classid);
429
Johannes Berge100bb62008-04-30 18:51:21 +0200430 if (queue - 1 >= QD_NUM(hw))
Jiri Bencf0706e82007-05-05 11:45:53 -0700431 return 0;
432
433 return queue;
434}
435
436
437static unsigned long wme_classop_bind(struct Qdisc *qd, unsigned long parent,
438 u32 classid)
439{
440 return wme_classop_get(qd, classid);
441}
442
443
444static void wme_classop_put(struct Qdisc *q, unsigned long cl)
445{
446}
447
448
449static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent,
Patrick McHardy1e904742008-01-22 22:11:17 -0800450 struct nlattr **tca, unsigned long *arg)
Jiri Bencf0706e82007-05-05 11:45:53 -0700451{
452 unsigned long cl = *arg;
David S. Miller5ce2d482008-07-08 17:06:30 -0700453 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
Jiri Bencf0706e82007-05-05 11:45:53 -0700454 struct ieee80211_hw *hw = &local->hw;
455
Johannes Berge100bb62008-04-30 18:51:21 +0200456 if (cl - 1 > QD_NUM(hw))
Jiri Bencf0706e82007-05-05 11:45:53 -0700457 return -ENOENT;
458
459 /* TODO: put code to program hardware queue parameters here,
460 * to allow programming from tc command line */
461
462 return 0;
463}
464
465
466/* we don't support deleting hardware queues
467 * when we add WMM-SA support - TSPECs may be deleted here */
468static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
469{
David S. Miller5ce2d482008-07-08 17:06:30 -0700470 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
Jiri Bencf0706e82007-05-05 11:45:53 -0700471 struct ieee80211_hw *hw = &local->hw;
472
Johannes Berge100bb62008-04-30 18:51:21 +0200473 if (cl - 1 > QD_NUM(hw))
Jiri Bencf0706e82007-05-05 11:45:53 -0700474 return -ENOENT;
475 return 0;
476}
477
478
479static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl,
480 struct sk_buff *skb, struct tcmsg *tcm)
481{
482 struct ieee80211_sched_data *q = qdisc_priv(qd);
David S. Miller5ce2d482008-07-08 17:06:30 -0700483 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
Jiri Bencf0706e82007-05-05 11:45:53 -0700484 struct ieee80211_hw *hw = &local->hw;
485
Johannes Berge100bb62008-04-30 18:51:21 +0200486 if (cl - 1 > QD_NUM(hw))
Jiri Bencf0706e82007-05-05 11:45:53 -0700487 return -ENOENT;
488 tcm->tcm_handle = TC_H_MIN(cl);
489 tcm->tcm_parent = qd->handle;
490 tcm->tcm_info = q->queues[cl-1]->handle; /* do we need this? */
491 return 0;
492}
493
494
495static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
496{
David S. Miller5ce2d482008-07-08 17:06:30 -0700497 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
Jiri Bencf0706e82007-05-05 11:45:53 -0700498 struct ieee80211_hw *hw = &local->hw;
499 int queue;
500
501 if (arg->stop)
502 return;
503
Johannes Berge100bb62008-04-30 18:51:21 +0200504 for (queue = 0; queue < QD_NUM(hw); queue++) {
Jiri Bencf0706e82007-05-05 11:45:53 -0700505 if (arg->count < arg->skip) {
506 arg->count++;
507 continue;
508 }
509 /* we should return classids for our internal queues here
510 * as well as the external ones */
511 if (arg->fn(qd, queue+1, arg) < 0) {
512 arg->stop = 1;
513 break;
514 }
515 arg->count++;
516 }
517}
518
519
520static struct tcf_proto ** wme_classop_find_tcf(struct Qdisc *qd,
521 unsigned long cl)
522{
523 struct ieee80211_sched_data *q = qdisc_priv(qd);
524
525 if (cl)
526 return NULL;
527
528 return &q->filter_list;
529}
530
531
532/* this qdisc is classful (i.e. has classes, some of which may have leaf qdiscs attached)
533 * - these are the operations on the classes */
Eric Dumazet20fea082007-11-14 01:44:41 -0800534static const struct Qdisc_class_ops class_ops =
Jiri Bencf0706e82007-05-05 11:45:53 -0700535{
536 .graft = wme_classop_graft,
537 .leaf = wme_classop_leaf,
538
539 .get = wme_classop_get,
540 .put = wme_classop_put,
541 .change = wme_classop_change,
542 .delete = wme_classop_delete,
543 .walk = wme_classop_walk,
544
545 .tcf_chain = wme_classop_find_tcf,
546 .bind_tcf = wme_classop_bind,
547 .unbind_tcf = wme_classop_put,
548
549 .dump = wme_classop_dump_class,
550};
551
552
553/* queueing discipline operations */
Eric Dumazet20fea082007-11-14 01:44:41 -0800554static struct Qdisc_ops wme_qdisc_ops __read_mostly =
Jiri Bencf0706e82007-05-05 11:45:53 -0700555{
556 .next = NULL,
557 .cl_ops = &class_ops,
558 .id = "ieee80211",
559 .priv_size = sizeof(struct ieee80211_sched_data),
560
561 .enqueue = wme_qdiscop_enqueue,
562 .dequeue = wme_qdiscop_dequeue,
563 .requeue = wme_qdiscop_requeue,
564 .drop = NULL, /* drop not needed since we are always the root qdisc */
565
566 .init = wme_qdiscop_init,
567 .reset = wme_qdiscop_reset,
568 .destroy = wme_qdiscop_destroy,
569 .change = wme_qdiscop_tune,
570
571 .dump = wme_qdiscop_dump,
572};
573
574
575void ieee80211_install_qdisc(struct net_device *dev)
576{
David S. Millere8a04642008-07-17 00:34:19 -0700577 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
Jiri Bencf0706e82007-05-05 11:45:53 -0700578 struct Qdisc *qdisc;
579
David S. Millerb0e1e642008-07-08 17:42:10 -0700580 qdisc = qdisc_create_dflt(dev, txq,
David S. Millerbb949fb2008-07-08 16:55:56 -0700581 &wme_qdisc_ops, TC_H_ROOT);
Jiri Bencf0706e82007-05-05 11:45:53 -0700582 if (!qdisc) {
583 printk(KERN_ERR "%s: qdisc installation failed\n", dev->name);
584 return;
585 }
586
587 /* same handle as would be allocated by qdisc_alloc_handle() */
588 qdisc->handle = 0x80010000;
589
590 qdisc_lock_tree(dev);
David S. Millerb0e1e642008-07-08 17:42:10 -0700591 list_add_tail(&qdisc->list, &txq->qdisc_list);
592 txq->qdisc_sleeping = qdisc;
Jiri Bencf0706e82007-05-05 11:45:53 -0700593 qdisc_unlock_tree(dev);
594}
595
596
597int ieee80211_qdisc_installed(struct net_device *dev)
598{
David S. Millere8a04642008-07-17 00:34:19 -0700599 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
David S. Millerb0e1e642008-07-08 17:42:10 -0700600
601 return txq->qdisc_sleeping->ops == &wme_qdisc_ops;
Jiri Bencf0706e82007-05-05 11:45:53 -0700602}
603
604
605int ieee80211_wme_register(void)
606{
607 return register_qdisc(&wme_qdisc_ops);
608}
609
610
611void ieee80211_wme_unregister(void)
612{
613 unregister_qdisc(&wme_qdisc_ops);
614}
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200615
616int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
617 struct sta_info *sta, u16 tid)
618{
619 int i;
David S. Millere8a04642008-07-17 00:34:19 -0700620 struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, 0);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200621 struct ieee80211_sched_data *q =
David S. Millerb0e1e642008-07-08 17:42:10 -0700622 qdisc_priv(txq->qdisc_sleeping);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200623 DECLARE_MAC_BUF(mac);
624
625 /* prepare the filter and save it for the SW queue
Johannes Berge100bb62008-04-30 18:51:21 +0200626 * matching the received HW queue */
627
628 if (!local->hw.ampdu_queues)
629 return -EPERM;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200630
631 /* try to get a Qdisc from the pool */
Johannes Berge100bb62008-04-30 18:51:21 +0200632 for (i = local->hw.queues; i < QD_NUM(&local->hw); i++)
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200633 if (!test_and_set_bit(i, q->qdisc_pool)) {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200634 ieee80211_stop_queue(local_to_hw(local), i);
635 sta->tid_to_tx_q[tid] = i;
636
637 /* IF there are already pending packets
638 * on this tid first we need to drain them
639 * on the previous queue
640 * since HT is strict in order */
641#ifdef CONFIG_MAC80211_HT_DEBUG
642 if (net_ratelimit())
643 printk(KERN_DEBUG "allocated aggregation queue"
Tomas Winkler995ad6c2008-06-12 20:08:19 +0300644 " %d tid %d addr %s pool=0x%lX\n",
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200645 i, tid, print_mac(mac, sta->addr),
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200646 q->qdisc_pool[0]);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200647#endif /* CONFIG_MAC80211_HT_DEBUG */
648 return 0;
649 }
650
651 return -EAGAIN;
652}
653
654/**
David S. Millere8a04642008-07-17 00:34:19 -0700655 * the caller needs to hold netdev_get_tx_queue(local->mdev, X)->lock
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200656 */
657void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
658 struct sta_info *sta, u16 tid,
659 u8 requeue)
660{
Johannes Berge100bb62008-04-30 18:51:21 +0200661 struct ieee80211_hw *hw = &local->hw;
David S. Millere8a04642008-07-17 00:34:19 -0700662 struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, 0);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200663 struct ieee80211_sched_data *q =
David S. Millerb0e1e642008-07-08 17:42:10 -0700664 qdisc_priv(txq->qdisc_sleeping);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200665 int agg_queue = sta->tid_to_tx_q[tid];
666
667 /* return the qdisc to the pool */
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200668 clear_bit(agg_queue, q->qdisc_pool);
Johannes Berge100bb62008-04-30 18:51:21 +0200669 sta->tid_to_tx_q[tid] = QD_NUM(hw);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200670
671 if (requeue)
672 ieee80211_requeue(local, agg_queue);
673 else
674 q->queues[agg_queue]->ops->reset(q->queues[agg_queue]);
675}
676
677void ieee80211_requeue(struct ieee80211_local *local, int queue)
678{
David S. Millere8a04642008-07-17 00:34:19 -0700679 struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, 0);
David S. Millerb0e1e642008-07-08 17:42:10 -0700680 struct Qdisc *root_qd = txq->qdisc_sleeping;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200681 struct ieee80211_sched_data *q = qdisc_priv(root_qd);
682 struct Qdisc *qdisc = q->queues[queue];
683 struct sk_buff *skb = NULL;
Ron Rindjunsky0da926f2008-04-23 13:45:12 +0300684 u32 len;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200685
686 if (!qdisc || !qdisc->dequeue)
687 return;
688
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200689 for (len = qdisc->q.qlen; len > 0; len--) {
690 skb = qdisc->dequeue(qdisc);
691 root_qd->q.qlen--;
692 /* packet will be classified again and */
693 /* skb->packet_data->queue will be overridden if needed */
694 if (skb)
695 wme_qdiscop_enqueue(skb, root_qd);
696 }
697}