blob: c703f8b44e92735214056c548a8495dab5c0ca7d [file] [log] [blame]
Jiri Bencf0706e82007-05-05 11:45:53 -07001/*
2 * Copyright 2004, Instant802 Networks, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/netdevice.h>
10#include <linux/skbuff.h>
11#include <linux/module.h>
12#include <linux/if_arp.h>
13#include <linux/types.h>
14#include <net/ip.h>
15#include <net/pkt_sched.h>
16
17#include <net/mac80211.h>
18#include "ieee80211_i.h"
19#include "wme.h"
20
David S. Miller51cb6db2008-07-15 03:34:57 -070021/* Default mapping in classifier to work with default
Johannes Berge100bb62008-04-30 18:51:21 +020022 * queue setup.
23 */
Ron Rindjunsky9e723492008-01-28 14:07:18 +020024const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
Jiri Bencf0706e82007-05-05 11:45:53 -070025
Guy Cohena8bdf292008-01-09 19:12:48 +020026static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
Jiri Bencf0706e82007-05-05 11:45:53 -070027
David S. Miller51cb6db2008-07-15 03:34:57 -070028/* Given a data frame determine the 802.1p/1d tag to use. */
29static unsigned int classify_1d(struct sk_buff *skb)
Jiri Bencf0706e82007-05-05 11:45:53 -070030{
David S. Miller51cb6db2008-07-15 03:34:57 -070031 unsigned int dscp;
Jiri Bencf0706e82007-05-05 11:45:53 -070032
33 /* skb->priority values from 256->263 are magic values to
David S. Miller51cb6db2008-07-15 03:34:57 -070034 * directly indicate a specific 802.1d priority. This is used
35 * to allow 802.1d priority to be passed directly in from VLAN
36 * tags, etc.
37 */
Jiri Bencf0706e82007-05-05 11:45:53 -070038 if (skb->priority >= 256 && skb->priority <= 263)
39 return skb->priority - 256;
40
David S. Miller51cb6db2008-07-15 03:34:57 -070041 switch (skb->protocol) {
Arnaldo Carvalho de Melo60678042008-09-20 22:20:49 -070042 case htons(ETH_P_IP):
David S. Miller51cb6db2008-07-15 03:34:57 -070043 dscp = ip_hdr(skb)->tos & 0xfc;
44 break;
45
46 default:
Jiri Bencf0706e82007-05-05 11:45:53 -070047 return 0;
David S. Miller51cb6db2008-07-15 03:34:57 -070048 }
Jiri Bencf0706e82007-05-05 11:45:53 -070049
Jiri Bencf0706e82007-05-05 11:45:53 -070050 return dscp >> 5;
51}
52
53
David S. Miller51cb6db2008-07-15 03:34:57 -070054static int wme_downgrade_ac(struct sk_buff *skb)
Jiri Bencf0706e82007-05-05 11:45:53 -070055{
56 switch (skb->priority) {
57 case 6:
58 case 7:
59 skb->priority = 5; /* VO -> VI */
60 return 0;
61 case 4:
62 case 5:
63 skb->priority = 3; /* VI -> BE */
64 return 0;
65 case 0:
66 case 3:
67 skb->priority = 2; /* BE -> BK */
68 return 0;
69 default:
70 return -1;
71 }
72}
73
74
David S. Miller51cb6db2008-07-15 03:34:57 -070075/* Indicate which queue to use. */
76static u16 classify80211(struct sk_buff *skb, struct net_device *dev)
Jiri Bencf0706e82007-05-05 11:45:53 -070077{
David S. Miller51cb6db2008-07-15 03:34:57 -070078 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
Jiri Bencf0706e82007-05-05 11:45:53 -070079 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
Jiri Bencf0706e82007-05-05 11:45:53 -070080
Harvey Harrison002aaf42008-06-11 14:21:59 -070081 if (!ieee80211_is_data(hdr->frame_control)) {
Jiri Bencf0706e82007-05-05 11:45:53 -070082 /* management frames go on AC_VO queue, but are sent
83 * without QoS control fields */
Johannes Berge100bb62008-04-30 18:51:21 +020084 return 0;
Jiri Bencf0706e82007-05-05 11:45:53 -070085 }
86
Johannes Bergf9d540e2007-09-28 14:02:09 +020087 if (0 /* injected */) {
88 /* use AC from radiotap */
Jiri Bencf0706e82007-05-05 11:45:53 -070089 }
90
Harvey Harrison002aaf42008-06-11 14:21:59 -070091 if (!ieee80211_is_data_qos(hdr->frame_control)) {
Jiri Bencf0706e82007-05-05 11:45:53 -070092 skb->priority = 0; /* required for correct WPA/11i MIC */
93 return ieee802_1d_to_ac[skb->priority];
94 }
95
96 /* use the data classifier to determine what 802.1d tag the
Johannes Berg3c3b00c2007-08-28 17:01:55 -040097 * data frame has */
David S. Miller51cb6db2008-07-15 03:34:57 -070098 skb->priority = classify_1d(skb);
Jiri Bencf0706e82007-05-05 11:45:53 -070099
Johannes Berg3c3b00c2007-08-28 17:01:55 -0400100 /* in case we are a client verify acm is not set for this ac */
Jiri Bencf0706e82007-05-05 11:45:53 -0700101 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
102 if (wme_downgrade_ac(skb)) {
David S. Miller51cb6db2008-07-15 03:34:57 -0700103 /* The old code would drop the packet in this
104 * case.
105 */
106 return 0;
Jiri Bencf0706e82007-05-05 11:45:53 -0700107 }
108 }
109
110 /* look up which queue to use for frames with this 1d tag */
111 return ieee802_1d_to_ac[skb->priority];
112}
113
David S. Miller51cb6db2008-07-15 03:34:57 -0700114u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb)
Jiri Bencf0706e82007-05-05 11:45:53 -0700115{
Jiri Bencf0706e82007-05-05 11:45:53 -0700116 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
David S. Miller51cb6db2008-07-15 03:34:57 -0700117 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
118 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200119 struct sta_info *sta;
David S. Miller51cb6db2008-07-15 03:34:57 -0700120 u16 queue;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200121 u8 tid;
Jiri Bencf0706e82007-05-05 11:45:53 -0700122
David S. Miller51cb6db2008-07-15 03:34:57 -0700123 queue = classify80211(skb, dev);
124 if (unlikely(queue >= local->hw.queues))
125 queue = local->hw.queues - 1;
126
Johannes Berge039fa42008-05-15 12:55:29 +0200127 if (info->flags & IEEE80211_TX_CTL_REQUEUE) {
Johannes Bergd0709a62008-02-25 16:27:46 +0100128 rcu_read_lock();
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200129 sta = sta_info_get(local, hdr->addr1);
Harvey Harrison238f74a2008-07-02 11:05:34 -0700130 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200131 if (sta) {
David S. Miller51cb6db2008-07-15 03:34:57 -0700132 struct ieee80211_hw *hw = &local->hw;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200133 int ampdu_queue = sta->tid_to_tx_q[tid];
David S. Miller51cb6db2008-07-15 03:34:57 -0700134
135 if ((ampdu_queue < ieee80211_num_queues(hw)) &&
136 test_bit(ampdu_queue, local->queue_pool)) {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200137 queue = ampdu_queue;
Johannes Berge039fa42008-05-15 12:55:29 +0200138 info->flags |= IEEE80211_TX_CTL_AMPDU;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200139 } else {
Johannes Berge039fa42008-05-15 12:55:29 +0200140 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200141 }
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200142 }
Johannes Bergd0709a62008-02-25 16:27:46 +0100143 rcu_read_unlock();
David S. Miller51cb6db2008-07-15 03:34:57 -0700144
145 return queue;
Jiri Bencf0706e82007-05-05 11:45:53 -0700146 }
147
David S. Miller51cb6db2008-07-15 03:34:57 -0700148 /* Now we know the 1d priority, fill in the QoS header if
149 * there is one.
Jiri Bencf0706e82007-05-05 11:45:53 -0700150 */
Harvey Harrison002aaf42008-06-11 14:21:59 -0700151 if (ieee80211_is_data_qos(hdr->frame_control)) {
152 u8 *p = ieee80211_get_qos_ctl(hdr);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200153 u8 ack_policy = 0;
Harvey Harrison238f74a2008-07-02 11:05:34 -0700154 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
Jiri Bencf0706e82007-05-05 11:45:53 -0700155 if (local->wifi_wme_noack_test)
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200156 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
Jiri Bencf0706e82007-05-05 11:45:53 -0700157 QOS_CONTROL_ACK_POLICY_SHIFT;
158 /* qos header is 2 bytes, second reserved */
Harvey Harrison002aaf42008-06-11 14:21:59 -0700159 *p++ = ack_policy | tid;
Jiri Bencf0706e82007-05-05 11:45:53 -0700160 *p = 0;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200161
Johannes Bergd0709a62008-02-25 16:27:46 +0100162 rcu_read_lock();
163
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200164 sta = sta_info_get(local, hdr->addr1);
165 if (sta) {
166 int ampdu_queue = sta->tid_to_tx_q[tid];
David S. Miller51cb6db2008-07-15 03:34:57 -0700167 struct ieee80211_hw *hw = &local->hw;
168
169 if ((ampdu_queue < ieee80211_num_queues(hw)) &&
170 test_bit(ampdu_queue, local->queue_pool)) {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200171 queue = ampdu_queue;
Johannes Berge039fa42008-05-15 12:55:29 +0200172 info->flags |= IEEE80211_TX_CTL_AMPDU;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200173 } else {
Johannes Berge039fa42008-05-15 12:55:29 +0200174 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200175 }
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200176 }
Johannes Bergd0709a62008-02-25 16:27:46 +0100177
178 rcu_read_unlock();
Jiri Bencf0706e82007-05-05 11:45:53 -0700179 }
180
Jiri Bencf0706e82007-05-05 11:45:53 -0700181 return queue;
182}
183
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200184int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
David S. Miller51cb6db2008-07-15 03:34:57 -0700185 struct sta_info *sta, u16 tid)
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200186{
187 int i;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200188
Johannes Bergd0f09802008-07-29 11:32:07 +0200189 /* XXX: currently broken due to cb/requeue use */
190 return -EPERM;
191
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200192 /* prepare the filter and save it for the SW queue
Johannes Berge100bb62008-04-30 18:51:21 +0200193 * matching the received HW queue */
194
195 if (!local->hw.ampdu_queues)
196 return -EPERM;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200197
198 /* try to get a Qdisc from the pool */
David S. Miller51cb6db2008-07-15 03:34:57 -0700199 for (i = local->hw.queues; i < ieee80211_num_queues(&local->hw); i++)
200 if (!test_and_set_bit(i, local->queue_pool)) {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200201 ieee80211_stop_queue(local_to_hw(local), i);
202 sta->tid_to_tx_q[tid] = i;
203
204 /* IF there are already pending packets
205 * on this tid first we need to drain them
206 * on the previous queue
207 * since HT is strict in order */
208#ifdef CONFIG_MAC80211_HT_DEBUG
David S. Miller51cb6db2008-07-15 03:34:57 -0700209 if (net_ratelimit()) {
210 DECLARE_MAC_BUF(mac);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200211 printk(KERN_DEBUG "allocated aggregation queue"
Tomas Winkler995ad6c2008-06-12 20:08:19 +0300212 " %d tid %d addr %s pool=0x%lX\n",
Johannes Berg17741cd2008-09-11 00:02:02 +0200213 i, tid, print_mac(mac, sta->sta.addr),
David S. Miller51cb6db2008-07-15 03:34:57 -0700214 local->queue_pool[0]);
215 }
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200216#endif /* CONFIG_MAC80211_HT_DEBUG */
217 return 0;
218 }
219
220 return -EAGAIN;
221}
222
223/**
David S. Millere8a04642008-07-17 00:34:19 -0700224 * the caller needs to hold netdev_get_tx_queue(local->mdev, X)->lock
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200225 */
226void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
227 struct sta_info *sta, u16 tid,
228 u8 requeue)
229{
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200230 int agg_queue = sta->tid_to_tx_q[tid];
David S. Miller51cb6db2008-07-15 03:34:57 -0700231 struct ieee80211_hw *hw = &local->hw;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200232
233 /* return the qdisc to the pool */
David S. Miller51cb6db2008-07-15 03:34:57 -0700234 clear_bit(agg_queue, local->queue_pool);
235 sta->tid_to_tx_q[tid] = ieee80211_num_queues(hw);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200236
David S. Miller51cb6db2008-07-15 03:34:57 -0700237 if (requeue) {
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200238 ieee80211_requeue(local, agg_queue);
David S. Miller51cb6db2008-07-15 03:34:57 -0700239 } else {
240 struct netdev_queue *txq;
David S. Miller83874002008-07-17 00:53:03 -0700241 spinlock_t *root_lock;
David S. Miller35ed4e72008-08-02 23:25:50 -0700242 struct Qdisc *q;
David S. Miller51cb6db2008-07-15 03:34:57 -0700243
244 txq = netdev_get_tx_queue(local->mdev, agg_queue);
David S. Miller35ed4e72008-08-02 23:25:50 -0700245 q = rcu_dereference(txq->qdisc);
246 root_lock = qdisc_lock(q);
David S. Miller51cb6db2008-07-15 03:34:57 -0700247
David S. Miller83874002008-07-17 00:53:03 -0700248 spin_lock_bh(root_lock);
David S. Miller35ed4e72008-08-02 23:25:50 -0700249 qdisc_reset(q);
David S. Miller83874002008-07-17 00:53:03 -0700250 spin_unlock_bh(root_lock);
David S. Miller51cb6db2008-07-15 03:34:57 -0700251 }
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200252}
253
254void ieee80211_requeue(struct ieee80211_local *local, int queue)
255{
David S. Miller51cb6db2008-07-15 03:34:57 -0700256 struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, queue);
257 struct sk_buff_head list;
David S. Miller83874002008-07-17 00:53:03 -0700258 spinlock_t *root_lock;
David S. Miller51cb6db2008-07-15 03:34:57 -0700259 struct Qdisc *qdisc;
Ron Rindjunsky0da926f2008-04-23 13:45:12 +0300260 u32 len;
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200261
David S. Miller51cb6db2008-07-15 03:34:57 -0700262 rcu_read_lock_bh();
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200263
David S. Miller51cb6db2008-07-15 03:34:57 -0700264 qdisc = rcu_dereference(txq->qdisc);
265 if (!qdisc || !qdisc->dequeue)
266 goto out_unlock;
267
268 skb_queue_head_init(&list);
269
David S. Miller83874002008-07-17 00:53:03 -0700270 root_lock = qdisc_root_lock(qdisc);
271 spin_lock(root_lock);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200272 for (len = qdisc->q.qlen; len > 0; len--) {
David S. Miller51cb6db2008-07-15 03:34:57 -0700273 struct sk_buff *skb = qdisc->dequeue(qdisc);
274
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200275 if (skb)
David S. Miller51cb6db2008-07-15 03:34:57 -0700276 __skb_queue_tail(&list, skb);
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200277 }
David S. Miller83874002008-07-17 00:53:03 -0700278 spin_unlock(root_lock);
David S. Miller51cb6db2008-07-15 03:34:57 -0700279
280 for (len = list.qlen; len > 0; len--) {
281 struct sk_buff *skb = __skb_dequeue(&list);
282 u16 new_queue;
283
284 BUG_ON(!skb);
285 new_queue = ieee80211_select_queue(local->mdev, skb);
286 skb_set_queue_mapping(skb, new_queue);
287
288 txq = netdev_get_tx_queue(local->mdev, new_queue);
289
David S. Miller51cb6db2008-07-15 03:34:57 -0700290
291 qdisc = rcu_dereference(txq->qdisc);
David S. Miller83874002008-07-17 00:53:03 -0700292 root_lock = qdisc_root_lock(qdisc);
David S. Miller51cb6db2008-07-15 03:34:57 -0700293
David S. Miller83874002008-07-17 00:53:03 -0700294 spin_lock(root_lock);
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700295 qdisc_enqueue_root(skb, qdisc);
David S. Miller83874002008-07-17 00:53:03 -0700296 spin_unlock(root_lock);
David S. Miller51cb6db2008-07-15 03:34:57 -0700297 }
298
299out_unlock:
300 rcu_read_unlock_bh();
Ron Rindjunsky9e723492008-01-28 14:07:18 +0200301}