blob: b1e20ca03ffec8a74407d8a93b878a2ace10cc03 [file] [log] [blame]
Jiri Bencf0706e822007-05-05 11:45:53 -07001/*
2 * Copyright 2004, Instant802 Networks, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/netdevice.h>
10#include <linux/skbuff.h>
11#include <linux/module.h>
12#include <linux/if_arp.h>
13#include <linux/types.h>
14#include <net/ip.h>
15#include <net/pkt_sched.h>
16
17#include <net/mac80211.h>
18#include "ieee80211_i.h"
19#include "wme.h"
20
Jiri Bencf0706e822007-05-05 11:45:53 -070021/* maximum number of hardware queues we support. */
Johannes Berge100bb62008-04-30 18:51:21 +020022#define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES)
23/* current number of hardware queues we support. */
24#define QD_NUM(hw) ((hw)->queues + (hw)->ampdu_queues)
Ron Rindjunsky9e723492008-01-28 14:07:18 +020025
Johannes Berge100bb62008-04-30 18:51:21 +020026/*
27 * Default mapping in classifier to work with default
28 * queue setup.
29 */
Ron Rindjunsky9e723492008-01-28 14:07:18 +020030const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
Jiri Bencf0706e822007-05-05 11:45:53 -070031
32struct ieee80211_sched_data
33{
Johannes Berge100bb62008-04-30 18:51:21 +020034 unsigned long qdisc_pool[BITS_TO_LONGS(QD_MAX_QUEUES)];
Jiri Bencf0706e822007-05-05 11:45:53 -070035 struct tcf_proto *filter_list;
Johannes Berge100bb62008-04-30 18:51:21 +020036 struct Qdisc *queues[QD_MAX_QUEUES];
37 struct sk_buff_head requeued[QD_MAX_QUEUES];
Jiri Bencf0706e822007-05-05 11:45:53 -070038};
39
Guy Cohena8bdf292008-01-09 19:12:48 +020040static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
Jiri Bencf0706e822007-05-05 11:45:53 -070041
42/* given a data frame determine the 802.1p/1d tag to use */
43static inline unsigned classify_1d(struct sk_buff *skb, struct Qdisc *qd)
44{
45 struct iphdr *ip;
46 int dscp;
47 int offset;
48
49 struct ieee80211_sched_data *q = qdisc_priv(qd);
50 struct tcf_result res = { -1, 0 };
51
52 /* if there is a user set filter list, call out to that */
53 if (q->filter_list) {
54 tc_classify(skb, q->filter_list, &res);
55 if (res.class != -1)
56 return res.class;
57 }
58
59 /* skb->priority values from 256->263 are magic values to
60 * directly indicate a specific 802.1d priority.
61 * This is used to allow 802.1d priority to be passed directly in
62 * from VLAN tags, etc. */
63 if (skb->priority >= 256 && skb->priority <= 263)
64 return skb->priority - 256;
65
66 /* check there is a valid IP header present */
Guy Cohena8bdf292008-01-09 19:12:48 +020067 offset = ieee80211_get_hdrlen_from_skb(skb);
68 if (skb->len < offset + sizeof(llc_ip_hdr) + sizeof(*ip) ||
69 memcmp(skb->data + offset, llc_ip_hdr, sizeof(llc_ip_hdr)))
Jiri Bencf0706e822007-05-05 11:45:53 -070070 return 0;
71
Guy Cohena8bdf292008-01-09 19:12:48 +020072 ip = (struct iphdr *) (skb->data + offset + sizeof(llc_ip_hdr));
Jiri Bencf0706e822007-05-05 11:45:53 -070073
74 dscp = ip->tos & 0xfc;
75 if (dscp & 0x1c)
76 return 0;
77 return dscp >> 5;
78}
79
80
81static inline int wme_downgrade_ac(struct sk_buff *skb)
82{
83 switch (skb->priority) {
84 case 6:
85 case 7:
86 skb->priority = 5; /* VO -> VI */
87 return 0;
88 case 4:
89 case 5:
90 skb->priority = 3; /* VI -> BE */
91 return 0;
92 case 0:
93 case 3:
94 skb->priority = 2; /* BE -> BK */
95 return 0;
96 default:
97 return -1;
98 }
99}
100
101
102/* positive return value indicates which queue to use
103 * negative return value indicates to drop the frame */
Johannes Berge100bb62008-04-30 18:51:21 +0200104static int classify80211(struct sk_buff *skb, struct Qdisc *qd)
Jiri Bencf0706e822007-05-05 11:45:53 -0700105{
106 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
Jiri Bencf0706e822007-05-05 11:45:53 -0700107 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
108 unsigned short fc = le16_to_cpu(hdr->frame_control);
109 int qos;
Jiri Bencf0706e822007-05-05 11:45:53 -0700110
111 /* see if frame is data or non data frame */
112 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) {
113 /* management frames go on AC_VO queue, but are sent
114 * without QoS control fields */
Johannes Berge100bb62008-04-30 18:51:21 +0200115 return 0;
Jiri Bencf0706e822007-05-05 11:45:53 -0700116 }
117
Johannes Bergf9d540e2007-09-28 14:02:09 +0200118 if (0 /* injected */) {
119 /* use AC from radiotap */
Jiri Bencf0706e822007-05-05 11:45:53 -0700120 }
121
122 /* is this a QoS frame? */
123 qos = fc & IEEE80211_STYPE_QOS_DATA;
124
125 if (!qos) {
126 skb->priority = 0; /* required for correct WPA/11i MIC */
127 return ieee802_1d_to_ac[skb->priority];
128 }
129
130 /* use the data classifier to determine what 802.1d tag the
Johannes Berg3c3b00c2007-08-28 17:01:55 -0400131 * data frame has */
Jiri Bencf0706e822007-05-05 11:45:53 -0700132 skb->priority = classify_1d(skb, qd);
133
Johannes Berg3c3b00c2007-08-28 17:01:55 -0400134 /* in case we are a client verify acm is not set for this ac */
Jiri Bencf0706e822007-05-05 11:45:53 -0700135 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
136 if (wme_downgrade_ac(skb)) {
Johannes Berg3c3b00c2007-08-28 17:01:55 -0400137 /* No AC with lower priority has acm=0, drop packet. */
Jiri Bencf0706e822007-05-05 11:45:53 -0700138 return -1;
139 }
140 }
141
142 /* look up which queue to use for frames with this 1d tag */
143 return ieee802_1d_to_ac[skb->priority];
144}
145
146
147static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
148{
149 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
Johannes Berge100bb62008-04-30 18:51:21 +0200150 struct ieee80211_hw *hw = &local->hw;
Jiri Bencf0706e822007-05-05 11:45:53 -0700151 struct ieee80211_sched_data *q = qdisc_priv(qd);
152 struct ieee80211_tx_packet_data *pkt_data =
153 (struct ieee80211_tx_packet_data *) skb->cb;
154 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
155 unsigned short fc = le16_to_cpu(hdr->frame_control);
156 struct Qdisc *qdisc;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200157 struct sta_info *sta;
Johannes Berge100bb62008-04-30 18:51:21 +0200158 int err;
159 u16 queue;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200160 u8 tid;
Jiri Bencf0706e822007-05-05 11:45:53 -0700161
Jiri Slabye8bf9642007-08-28 17:01:54 -0400162 if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200163 queue = pkt_data->queue;
Johannes Bergd0709a62008-02-25 16:27:46 +0100164 rcu_read_lock();
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200165 sta = sta_info_get(local, hdr->addr1);
166 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
167 if (sta) {
168 int ampdu_queue = sta->tid_to_tx_q[tid];
Johannes Berge100bb62008-04-30 18:51:21 +0200169 if ((ampdu_queue < QD_NUM(hw)) &&
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200170 test_bit(ampdu_queue, q->qdisc_pool)) {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200171 queue = ampdu_queue;
172 pkt_data->flags |= IEEE80211_TXPD_AMPDU;
173 } else {
174 pkt_data->flags &= ~IEEE80211_TXPD_AMPDU;
175 }
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200176 }
Johannes Bergd0709a62008-02-25 16:27:46 +0100177 rcu_read_unlock();
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200178 skb_queue_tail(&q->requeued[queue], skb);
Jiri Bencf0706e822007-05-05 11:45:53 -0700179 qd->q.qlen++;
180 return 0;
181 }
182
183 queue = classify80211(skb, qd);
184
Johannes Berge100bb62008-04-30 18:51:21 +0200185 if (unlikely(queue >= local->hw.queues))
186 queue = local->hw.queues - 1;
187
Jiri Bencf0706e822007-05-05 11:45:53 -0700188 /* now we know the 1d priority, fill in the QoS header if there is one
189 */
190 if (WLAN_FC_IS_QOS_DATA(fc)) {
191 u8 *p = skb->data + ieee80211_get_hdrlen(fc) - 2;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200192 u8 ack_policy = 0;
193 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
Jiri Bencf0706e822007-05-05 11:45:53 -0700194 if (local->wifi_wme_noack_test)
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200195 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
Jiri Bencf0706e822007-05-05 11:45:53 -0700196 QOS_CONTROL_ACK_POLICY_SHIFT;
197 /* qos header is 2 bytes, second reserved */
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200198 *p = ack_policy | tid;
Jiri Bencf0706e822007-05-05 11:45:53 -0700199 p++;
200 *p = 0;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200201
Johannes Bergd0709a62008-02-25 16:27:46 +0100202 rcu_read_lock();
203
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200204 sta = sta_info_get(local, hdr->addr1);
205 if (sta) {
206 int ampdu_queue = sta->tid_to_tx_q[tid];
Johannes Berge100bb62008-04-30 18:51:21 +0200207 if ((ampdu_queue < QD_NUM(hw)) &&
208 test_bit(ampdu_queue, q->qdisc_pool)) {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200209 queue = ampdu_queue;
210 pkt_data->flags |= IEEE80211_TXPD_AMPDU;
211 } else {
212 pkt_data->flags &= ~IEEE80211_TXPD_AMPDU;
213 }
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200214 }
Johannes Bergd0709a62008-02-25 16:27:46 +0100215
216 rcu_read_unlock();
Jiri Bencf0706e822007-05-05 11:45:53 -0700217 }
218
John W. Linville3df5ee62008-05-01 17:07:32 -0400219 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
220 pkt_data->queue = (unsigned int) queue;
221 qdisc = q->queues[queue];
222 err = qdisc->enqueue(skb, qdisc);
223 if (err == NET_XMIT_SUCCESS) {
224 qd->q.qlen++;
225 qd->bstats.bytes += skb->len;
226 qd->bstats.packets++;
227 return NET_XMIT_SUCCESS;
Jiri Bencf0706e822007-05-05 11:45:53 -0700228 }
229 qd->qstats.drops++;
230 return err;
231}
232
233
234/* TODO: clean up the cases where master_hard_start_xmit
235 * returns non 0 - it shouldn't ever do that. Once done we
236 * can remove this function */
237static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd)
238{
239 struct ieee80211_sched_data *q = qdisc_priv(qd);
240 struct ieee80211_tx_packet_data *pkt_data =
241 (struct ieee80211_tx_packet_data *) skb->cb;
242 struct Qdisc *qdisc;
243 int err;
244
245 /* we recorded which queue to use earlier! */
246 qdisc = q->queues[pkt_data->queue];
247
248 if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) {
249 qd->q.qlen++;
250 return 0;
251 }
252 qd->qstats.drops++;
253 return err;
254}
255
256
257static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
258{
259 struct ieee80211_sched_data *q = qdisc_priv(qd);
260 struct net_device *dev = qd->dev;
261 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
262 struct ieee80211_hw *hw = &local->hw;
263 struct sk_buff *skb;
264 struct Qdisc *qdisc;
265 int queue;
266
267 /* check all the h/w queues in numeric/priority order */
Johannes Berge100bb62008-04-30 18:51:21 +0200268 for (queue = 0; queue < QD_NUM(hw); queue++) {
Jiri Bencf0706e822007-05-05 11:45:53 -0700269 /* see if there is room in this hardware queue */
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200270 if ((test_bit(IEEE80211_LINK_STATE_XOFF,
271 &local->state[queue])) ||
272 (test_bit(IEEE80211_LINK_STATE_PENDING,
273 &local->state[queue])) ||
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200274 (!test_bit(queue, q->qdisc_pool)))
Jiri Bencf0706e822007-05-05 11:45:53 -0700275 continue;
276
277 /* there is space - try and get a frame */
278 skb = skb_dequeue(&q->requeued[queue]);
279 if (skb) {
280 qd->q.qlen--;
281 return skb;
282 }
283
284 qdisc = q->queues[queue];
285 skb = qdisc->dequeue(qdisc);
286 if (skb) {
287 qd->q.qlen--;
288 return skb;
289 }
290 }
291 /* returning a NULL here when all the h/w queues are full means we
292 * never need to call netif_stop_queue in the driver */
293 return NULL;
294}
295
296
297static void wme_qdiscop_reset(struct Qdisc* qd)
298{
299 struct ieee80211_sched_data *q = qdisc_priv(qd);
300 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
301 struct ieee80211_hw *hw = &local->hw;
302 int queue;
303
304 /* QUESTION: should we have some hardware flush functionality here? */
305
Johannes Berge100bb62008-04-30 18:51:21 +0200306 for (queue = 0; queue < QD_NUM(hw); queue++) {
Jiri Bencf0706e822007-05-05 11:45:53 -0700307 skb_queue_purge(&q->requeued[queue]);
308 qdisc_reset(q->queues[queue]);
309 }
310 qd->q.qlen = 0;
311}
312
313
314static void wme_qdiscop_destroy(struct Qdisc* qd)
315{
316 struct ieee80211_sched_data *q = qdisc_priv(qd);
317 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
318 struct ieee80211_hw *hw = &local->hw;
319 int queue;
320
321 tcf_destroy_chain(q->filter_list);
322 q->filter_list = NULL;
323
Johannes Berge100bb62008-04-30 18:51:21 +0200324 for (queue = 0; queue < QD_NUM(hw); queue++) {
Jiri Bencf0706e822007-05-05 11:45:53 -0700325 skb_queue_purge(&q->requeued[queue]);
326 qdisc_destroy(q->queues[queue]);
327 q->queues[queue] = &noop_qdisc;
328 }
329}
330
331
332/* called whenever parameters are updated on existing qdisc */
Patrick McHardy1e904742008-01-22 22:11:17 -0800333static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt)
Jiri Bencf0706e822007-05-05 11:45:53 -0700334{
Jiri Bencf0706e822007-05-05 11:45:53 -0700335 return 0;
336}
337
338
339/* called during initial creation of qdisc on device */
Patrick McHardy1e904742008-01-22 22:11:17 -0800340static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
Jiri Bencf0706e822007-05-05 11:45:53 -0700341{
342 struct ieee80211_sched_data *q = qdisc_priv(qd);
343 struct net_device *dev = qd->dev;
344 struct ieee80211_local *local;
Johannes Berge100bb62008-04-30 18:51:21 +0200345 struct ieee80211_hw *hw;
Jiri Bencf0706e822007-05-05 11:45:53 -0700346 int err = 0, i;
347
348 /* check that device is a mac80211 device */
349 if (!dev->ieee80211_ptr ||
350 dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
351 return -EINVAL;
352
Johannes Berge100bb62008-04-30 18:51:21 +0200353 local = wdev_priv(dev->ieee80211_ptr);
354 hw = &local->hw;
355
356 /* only allow on master dev */
357 if (dev != local->mdev)
Jiri Bencf0706e822007-05-05 11:45:53 -0700358 return -EINVAL;
359
Johannes Berge100bb62008-04-30 18:51:21 +0200360 /* ensure that we are root qdisc */
361 if (qd->parent != TC_H_ROOT)
Jiri Bencf0706e822007-05-05 11:45:53 -0700362 return -EINVAL;
363
364 if (qd->flags & TCQ_F_INGRESS)
365 return -EINVAL;
366
Jiri Bencf0706e822007-05-05 11:45:53 -0700367 /* if options were passed in, set them */
Johannes Berge100bb62008-04-30 18:51:21 +0200368 if (opt)
Jiri Bencf0706e822007-05-05 11:45:53 -0700369 err = wme_qdiscop_tune(qd, opt);
Jiri Bencf0706e822007-05-05 11:45:53 -0700370
371 /* create child queues */
Johannes Berge100bb62008-04-30 18:51:21 +0200372 for (i = 0; i < QD_NUM(hw); i++) {
Jiri Bencf0706e822007-05-05 11:45:53 -0700373 skb_queue_head_init(&q->requeued[i]);
374 q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops,
375 qd->handle);
Johannes Berg136e83d2007-09-10 13:55:08 +0200376 if (!q->queues[i]) {
Jiri Bencf0706e822007-05-05 11:45:53 -0700377 q->queues[i] = &noop_qdisc;
378 printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i);
379 }
380 }
381
Johannes Berge100bb62008-04-30 18:51:21 +0200382 /* non-aggregation queues: reserve/mark as used */
383 for (i = 0; i < local->hw.queues; i++)
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200384 set_bit(i, q->qdisc_pool);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200385
Jiri Bencf0706e822007-05-05 11:45:53 -0700386 return err;
387}
388
389static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb)
390{
Jiri Bencf0706e822007-05-05 11:45:53 -0700391 return -1;
392}
393
394
395static int wme_classop_graft(struct Qdisc *qd, unsigned long arg,
396 struct Qdisc *new, struct Qdisc **old)
397{
398 struct ieee80211_sched_data *q = qdisc_priv(qd);
399 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
400 struct ieee80211_hw *hw = &local->hw;
401 unsigned long queue = arg - 1;
402
Johannes Berge100bb62008-04-30 18:51:21 +0200403 if (queue >= QD_NUM(hw))
Jiri Bencf0706e822007-05-05 11:45:53 -0700404 return -EINVAL;
405
406 if (!new)
407 new = &noop_qdisc;
408
409 sch_tree_lock(qd);
410 *old = q->queues[queue];
411 q->queues[queue] = new;
412 qdisc_reset(*old);
413 sch_tree_unlock(qd);
414
415 return 0;
416}
417
418
419static struct Qdisc *
420wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
421{
422 struct ieee80211_sched_data *q = qdisc_priv(qd);
423 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
424 struct ieee80211_hw *hw = &local->hw;
425 unsigned long queue = arg - 1;
426
Johannes Berge100bb62008-04-30 18:51:21 +0200427 if (queue >= QD_NUM(hw))
Jiri Bencf0706e822007-05-05 11:45:53 -0700428 return NULL;
429
430 return q->queues[queue];
431}
432
433
434static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
435{
436 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
437 struct ieee80211_hw *hw = &local->hw;
438 unsigned long queue = TC_H_MIN(classid);
439
Johannes Berge100bb62008-04-30 18:51:21 +0200440 if (queue - 1 >= QD_NUM(hw))
Jiri Bencf0706e822007-05-05 11:45:53 -0700441 return 0;
442
443 return queue;
444}
445
446
447static unsigned long wme_classop_bind(struct Qdisc *qd, unsigned long parent,
448 u32 classid)
449{
450 return wme_classop_get(qd, classid);
451}
452
453
454static void wme_classop_put(struct Qdisc *q, unsigned long cl)
455{
456}
457
458
459static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent,
Patrick McHardy1e904742008-01-22 22:11:17 -0800460 struct nlattr **tca, unsigned long *arg)
Jiri Bencf0706e822007-05-05 11:45:53 -0700461{
462 unsigned long cl = *arg;
463 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
464 struct ieee80211_hw *hw = &local->hw;
465
Johannes Berge100bb62008-04-30 18:51:21 +0200466 if (cl - 1 > QD_NUM(hw))
Jiri Bencf0706e822007-05-05 11:45:53 -0700467 return -ENOENT;
468
469 /* TODO: put code to program hardware queue parameters here,
470 * to allow programming from tc command line */
471
472 return 0;
473}
474
475
476/* we don't support deleting hardware queues
477 * when we add WMM-SA support - TSPECs may be deleted here */
478static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
479{
480 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
481 struct ieee80211_hw *hw = &local->hw;
482
Johannes Berge100bb62008-04-30 18:51:21 +0200483 if (cl - 1 > QD_NUM(hw))
Jiri Bencf0706e822007-05-05 11:45:53 -0700484 return -ENOENT;
485 return 0;
486}
487
488
489static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl,
490 struct sk_buff *skb, struct tcmsg *tcm)
491{
492 struct ieee80211_sched_data *q = qdisc_priv(qd);
493 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
494 struct ieee80211_hw *hw = &local->hw;
495
Johannes Berge100bb62008-04-30 18:51:21 +0200496 if (cl - 1 > QD_NUM(hw))
Jiri Bencf0706e822007-05-05 11:45:53 -0700497 return -ENOENT;
498 tcm->tcm_handle = TC_H_MIN(cl);
499 tcm->tcm_parent = qd->handle;
500 tcm->tcm_info = q->queues[cl-1]->handle; /* do we need this? */
501 return 0;
502}
503
504
505static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
506{
507 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
508 struct ieee80211_hw *hw = &local->hw;
509 int queue;
510
511 if (arg->stop)
512 return;
513
Johannes Berge100bb62008-04-30 18:51:21 +0200514 for (queue = 0; queue < QD_NUM(hw); queue++) {
Jiri Bencf0706e822007-05-05 11:45:53 -0700515 if (arg->count < arg->skip) {
516 arg->count++;
517 continue;
518 }
519 /* we should return classids for our internal queues here
520 * as well as the external ones */
521 if (arg->fn(qd, queue+1, arg) < 0) {
522 arg->stop = 1;
523 break;
524 }
525 arg->count++;
526 }
527}
528
529
530static struct tcf_proto ** wme_classop_find_tcf(struct Qdisc *qd,
531 unsigned long cl)
532{
533 struct ieee80211_sched_data *q = qdisc_priv(qd);
534
535 if (cl)
536 return NULL;
537
538 return &q->filter_list;
539}
540
541
542/* this qdisc is classful (i.e. has classes, some of which may have leaf qdiscs attached)
543 * - these are the operations on the classes */
Eric Dumazet20fea082007-11-14 01:44:41 -0800544static const struct Qdisc_class_ops class_ops =
Jiri Bencf0706e822007-05-05 11:45:53 -0700545{
546 .graft = wme_classop_graft,
547 .leaf = wme_classop_leaf,
548
549 .get = wme_classop_get,
550 .put = wme_classop_put,
551 .change = wme_classop_change,
552 .delete = wme_classop_delete,
553 .walk = wme_classop_walk,
554
555 .tcf_chain = wme_classop_find_tcf,
556 .bind_tcf = wme_classop_bind,
557 .unbind_tcf = wme_classop_put,
558
559 .dump = wme_classop_dump_class,
560};
561
562
563/* queueing discipline operations */
Eric Dumazet20fea082007-11-14 01:44:41 -0800564static struct Qdisc_ops wme_qdisc_ops __read_mostly =
Jiri Bencf0706e822007-05-05 11:45:53 -0700565{
566 .next = NULL,
567 .cl_ops = &class_ops,
568 .id = "ieee80211",
569 .priv_size = sizeof(struct ieee80211_sched_data),
570
571 .enqueue = wme_qdiscop_enqueue,
572 .dequeue = wme_qdiscop_dequeue,
573 .requeue = wme_qdiscop_requeue,
574 .drop = NULL, /* drop not needed since we are always the root qdisc */
575
576 .init = wme_qdiscop_init,
577 .reset = wme_qdiscop_reset,
578 .destroy = wme_qdiscop_destroy,
579 .change = wme_qdiscop_tune,
580
581 .dump = wme_qdiscop_dump,
582};
583
584
585void ieee80211_install_qdisc(struct net_device *dev)
586{
587 struct Qdisc *qdisc;
588
589 qdisc = qdisc_create_dflt(dev, &wme_qdisc_ops, TC_H_ROOT);
590 if (!qdisc) {
591 printk(KERN_ERR "%s: qdisc installation failed\n", dev->name);
592 return;
593 }
594
595 /* same handle as would be allocated by qdisc_alloc_handle() */
596 qdisc->handle = 0x80010000;
597
598 qdisc_lock_tree(dev);
599 list_add_tail(&qdisc->list, &dev->qdisc_list);
600 dev->qdisc_sleeping = qdisc;
601 qdisc_unlock_tree(dev);
602}
603
604
605int ieee80211_qdisc_installed(struct net_device *dev)
606{
607 return dev->qdisc_sleeping->ops == &wme_qdisc_ops;
608}
609
610
611int ieee80211_wme_register(void)
612{
613 return register_qdisc(&wme_qdisc_ops);
614}
615
616
617void ieee80211_wme_unregister(void)
618{
619 unregister_qdisc(&wme_qdisc_ops);
620}
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200621
622int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
623 struct sta_info *sta, u16 tid)
624{
625 int i;
626 struct ieee80211_sched_data *q =
627 qdisc_priv(local->mdev->qdisc_sleeping);
628 DECLARE_MAC_BUF(mac);
629
630 /* prepare the filter and save it for the SW queue
Johannes Berge100bb62008-04-30 18:51:21 +0200631 * matching the received HW queue */
632
633 if (!local->hw.ampdu_queues)
634 return -EPERM;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200635
636 /* try to get a Qdisc from the pool */
Johannes Berge100bb62008-04-30 18:51:21 +0200637 for (i = local->hw.queues; i < QD_NUM(&local->hw); i++)
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200638 if (!test_and_set_bit(i, q->qdisc_pool)) {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200639 ieee80211_stop_queue(local_to_hw(local), i);
640 sta->tid_to_tx_q[tid] = i;
641
642 /* IF there are already pending packets
643 * on this tid first we need to drain them
644 * on the previous queue
645 * since HT is strict in order */
646#ifdef CONFIG_MAC80211_HT_DEBUG
647 if (net_ratelimit())
648 printk(KERN_DEBUG "allocated aggregation queue"
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200649 " %d tid %d addr %s pool=0x%lX",
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200650 i, tid, print_mac(mac, sta->addr),
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200651 q->qdisc_pool[0]);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200652#endif /* CONFIG_MAC80211_HT_DEBUG */
653 return 0;
654 }
655
656 return -EAGAIN;
657}
658
659/**
660 * the caller needs to hold local->mdev->queue_lock
661 */
662void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
663 struct sta_info *sta, u16 tid,
664 u8 requeue)
665{
Johannes Berge100bb62008-04-30 18:51:21 +0200666 struct ieee80211_hw *hw = &local->hw;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200667 struct ieee80211_sched_data *q =
668 qdisc_priv(local->mdev->qdisc_sleeping);
669 int agg_queue = sta->tid_to_tx_q[tid];
670
671 /* return the qdisc to the pool */
Ron Rindjunskya9af2012008-01-30 12:58:45 +0200672 clear_bit(agg_queue, q->qdisc_pool);
Johannes Berge100bb62008-04-30 18:51:21 +0200673 sta->tid_to_tx_q[tid] = QD_NUM(hw);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200674
675 if (requeue)
676 ieee80211_requeue(local, agg_queue);
677 else
678 q->queues[agg_queue]->ops->reset(q->queues[agg_queue]);
679}
680
681void ieee80211_requeue(struct ieee80211_local *local, int queue)
682{
683 struct Qdisc *root_qd = local->mdev->qdisc_sleeping;
684 struct ieee80211_sched_data *q = qdisc_priv(root_qd);
685 struct Qdisc *qdisc = q->queues[queue];
686 struct sk_buff *skb = NULL;
Ron Rindjunsky0da926f2008-04-23 13:45:12 +0300687 u32 len;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200688
689 if (!qdisc || !qdisc->dequeue)
690 return;
691
692 printk(KERN_DEBUG "requeue: qlen = %d\n", qdisc->q.qlen);
693 for (len = qdisc->q.qlen; len > 0; len--) {
694 skb = qdisc->dequeue(qdisc);
695 root_qd->q.qlen--;
696 /* packet will be classified again and */
697 /* skb->packet_data->queue will be overridden if needed */
698 if (skb)
699 wme_qdiscop_enqueue(skb, root_qd);
700 }
701}