Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2004, Instant802 Networks, Inc. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/netdevice.h> |
| 10 | #include <linux/skbuff.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/if_arp.h> |
| 13 | #include <linux/types.h> |
| 14 | #include <net/ip.h> |
| 15 | #include <net/pkt_sched.h> |
| 16 | |
| 17 | #include <net/mac80211.h> |
| 18 | #include "ieee80211_i.h" |
| 19 | #include "wme.h" |
| 20 | |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 21 | /* Default mapping in classifier to work with default |
Johannes Berg | e100bb6 | 2008-04-30 18:51:21 +0200 | [diff] [blame] | 22 | * queue setup. |
| 23 | */ |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 24 | const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 }; |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 25 | |
Guy Cohen | a8bdf29 | 2008-01-09 19:12:48 +0200 | [diff] [blame] | 26 | static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0}; |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 27 | |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 28 | /* Given a data frame determine the 802.1p/1d tag to use. */ |
| 29 | static unsigned int classify_1d(struct sk_buff *skb) |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 30 | { |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 31 | unsigned int dscp; |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 32 | |
| 33 | /* skb->priority values from 256->263 are magic values to |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 34 | * directly indicate a specific 802.1d priority. This is used |
| 35 | * to allow 802.1d priority to be passed directly in from VLAN |
| 36 | * tags, etc. |
| 37 | */ |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 38 | if (skb->priority >= 256 && skb->priority <= 263) |
| 39 | return skb->priority - 256; |
| 40 | |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 41 | switch (skb->protocol) { |
| 42 | case __constant_htons(ETH_P_IP): |
| 43 | dscp = ip_hdr(skb)->tos & 0xfc; |
| 44 | break; |
| 45 | |
| 46 | default: |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 47 | return 0; |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 48 | } |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 49 | |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 50 | if (dscp & 0x1c) |
| 51 | return 0; |
| 52 | return dscp >> 5; |
| 53 | } |
| 54 | |
| 55 | |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 56 | static int wme_downgrade_ac(struct sk_buff *skb) |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 57 | { |
| 58 | switch (skb->priority) { |
| 59 | case 6: |
| 60 | case 7: |
| 61 | skb->priority = 5; /* VO -> VI */ |
| 62 | return 0; |
| 63 | case 4: |
| 64 | case 5: |
| 65 | skb->priority = 3; /* VI -> BE */ |
| 66 | return 0; |
| 67 | case 0: |
| 68 | case 3: |
| 69 | skb->priority = 2; /* BE -> BK */ |
| 70 | return 0; |
| 71 | default: |
| 72 | return -1; |
| 73 | } |
| 74 | } |
| 75 | |
| 76 | |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 77 | /* Indicate which queue to use. */ |
| 78 | static u16 classify80211(struct sk_buff *skb, struct net_device *dev) |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 79 | { |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 80 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 81 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 82 | |
Harvey Harrison | 002aaf4 | 2008-06-11 14:21:59 -0700 | [diff] [blame] | 83 | if (!ieee80211_is_data(hdr->frame_control)) { |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 84 | /* management frames go on AC_VO queue, but are sent |
| 85 | * without QoS control fields */ |
Johannes Berg | e100bb6 | 2008-04-30 18:51:21 +0200 | [diff] [blame] | 86 | return 0; |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 87 | } |
| 88 | |
Johannes Berg | f9d540e | 2007-09-28 14:02:09 +0200 | [diff] [blame] | 89 | if (0 /* injected */) { |
| 90 | /* use AC from radiotap */ |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 91 | } |
| 92 | |
Harvey Harrison | 002aaf4 | 2008-06-11 14:21:59 -0700 | [diff] [blame] | 93 | if (!ieee80211_is_data_qos(hdr->frame_control)) { |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 94 | skb->priority = 0; /* required for correct WPA/11i MIC */ |
| 95 | return ieee802_1d_to_ac[skb->priority]; |
| 96 | } |
| 97 | |
| 98 | /* use the data classifier to determine what 802.1d tag the |
Johannes Berg | 3c3b00c | 2007-08-28 17:01:55 -0400 | [diff] [blame] | 99 | * data frame has */ |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 100 | skb->priority = classify_1d(skb); |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 101 | |
Johannes Berg | 3c3b00c | 2007-08-28 17:01:55 -0400 | [diff] [blame] | 102 | /* in case we are a client verify acm is not set for this ac */ |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 103 | while (unlikely(local->wmm_acm & BIT(skb->priority))) { |
| 104 | if (wme_downgrade_ac(skb)) { |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 105 | /* The old code would drop the packet in this |
| 106 | * case. |
| 107 | */ |
| 108 | return 0; |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 109 | } |
| 110 | } |
| 111 | |
| 112 | /* look up which queue to use for frames with this 1d tag */ |
| 113 | return ieee802_1d_to_ac[skb->priority]; |
| 114 | } |
| 115 | |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 116 | u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb) |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 117 | { |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 118 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 119 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
| 120 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 121 | struct sta_info *sta; |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 122 | u16 queue; |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 123 | u8 tid; |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 124 | |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 125 | queue = classify80211(skb, dev); |
| 126 | if (unlikely(queue >= local->hw.queues)) |
| 127 | queue = local->hw.queues - 1; |
| 128 | |
Johannes Berg | e039fa4 | 2008-05-15 12:55:29 +0200 | [diff] [blame] | 129 | if (info->flags & IEEE80211_TX_CTL_REQUEUE) { |
Johannes Berg | d0709a6 | 2008-02-25 16:27:46 +0100 | [diff] [blame] | 130 | rcu_read_lock(); |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 131 | sta = sta_info_get(local, hdr->addr1); |
Harvey Harrison | 238f74a | 2008-07-02 11:05:34 -0700 | [diff] [blame] | 132 | tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 133 | if (sta) { |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 134 | struct ieee80211_hw *hw = &local->hw; |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 135 | int ampdu_queue = sta->tid_to_tx_q[tid]; |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 136 | |
| 137 | if ((ampdu_queue < ieee80211_num_queues(hw)) && |
| 138 | test_bit(ampdu_queue, local->queue_pool)) { |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 139 | queue = ampdu_queue; |
Johannes Berg | e039fa4 | 2008-05-15 12:55:29 +0200 | [diff] [blame] | 140 | info->flags |= IEEE80211_TX_CTL_AMPDU; |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 141 | } else { |
Johannes Berg | e039fa4 | 2008-05-15 12:55:29 +0200 | [diff] [blame] | 142 | info->flags &= ~IEEE80211_TX_CTL_AMPDU; |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 143 | } |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 144 | } |
Johannes Berg | d0709a6 | 2008-02-25 16:27:46 +0100 | [diff] [blame] | 145 | rcu_read_unlock(); |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 146 | |
| 147 | return queue; |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 148 | } |
| 149 | |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 150 | /* Now we know the 1d priority, fill in the QoS header if |
| 151 | * there is one. |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 152 | */ |
Harvey Harrison | 002aaf4 | 2008-06-11 14:21:59 -0700 | [diff] [blame] | 153 | if (ieee80211_is_data_qos(hdr->frame_control)) { |
| 154 | u8 *p = ieee80211_get_qos_ctl(hdr); |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 155 | u8 ack_policy = 0; |
Harvey Harrison | 238f74a | 2008-07-02 11:05:34 -0700 | [diff] [blame] | 156 | tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 157 | if (local->wifi_wme_noack_test) |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 158 | ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK << |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 159 | QOS_CONTROL_ACK_POLICY_SHIFT; |
| 160 | /* qos header is 2 bytes, second reserved */ |
Harvey Harrison | 002aaf4 | 2008-06-11 14:21:59 -0700 | [diff] [blame] | 161 | *p++ = ack_policy | tid; |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 162 | *p = 0; |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 163 | |
Johannes Berg | d0709a6 | 2008-02-25 16:27:46 +0100 | [diff] [blame] | 164 | rcu_read_lock(); |
| 165 | |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 166 | sta = sta_info_get(local, hdr->addr1); |
| 167 | if (sta) { |
| 168 | int ampdu_queue = sta->tid_to_tx_q[tid]; |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 169 | struct ieee80211_hw *hw = &local->hw; |
| 170 | |
| 171 | if ((ampdu_queue < ieee80211_num_queues(hw)) && |
| 172 | test_bit(ampdu_queue, local->queue_pool)) { |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 173 | queue = ampdu_queue; |
Johannes Berg | e039fa4 | 2008-05-15 12:55:29 +0200 | [diff] [blame] | 174 | info->flags |= IEEE80211_TX_CTL_AMPDU; |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 175 | } else { |
Johannes Berg | e039fa4 | 2008-05-15 12:55:29 +0200 | [diff] [blame] | 176 | info->flags &= ~IEEE80211_TX_CTL_AMPDU; |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 177 | } |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 178 | } |
Johannes Berg | d0709a6 | 2008-02-25 16:27:46 +0100 | [diff] [blame] | 179 | |
| 180 | rcu_read_unlock(); |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 181 | } |
| 182 | |
Jiri Benc | f0706e8 | 2007-05-05 11:45:53 -0700 | [diff] [blame] | 183 | return queue; |
| 184 | } |
| 185 | |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 186 | int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 187 | struct sta_info *sta, u16 tid) |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 188 | { |
| 189 | int i; |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 190 | |
| 191 | /* prepare the filter and save it for the SW queue |
Johannes Berg | e100bb6 | 2008-04-30 18:51:21 +0200 | [diff] [blame] | 192 | * matching the received HW queue */ |
| 193 | |
| 194 | if (!local->hw.ampdu_queues) |
| 195 | return -EPERM; |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 196 | |
| 197 | /* try to get a Qdisc from the pool */ |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 198 | for (i = local->hw.queues; i < ieee80211_num_queues(&local->hw); i++) |
| 199 | if (!test_and_set_bit(i, local->queue_pool)) { |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 200 | ieee80211_stop_queue(local_to_hw(local), i); |
| 201 | sta->tid_to_tx_q[tid] = i; |
| 202 | |
| 203 | /* IF there are already pending packets |
| 204 | * on this tid first we need to drain them |
| 205 | * on the previous queue |
| 206 | * since HT is strict in order */ |
| 207 | #ifdef CONFIG_MAC80211_HT_DEBUG |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 208 | if (net_ratelimit()) { |
| 209 | DECLARE_MAC_BUF(mac); |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 210 | printk(KERN_DEBUG "allocated aggregation queue" |
Tomas Winkler | 995ad6c | 2008-06-12 20:08:19 +0300 | [diff] [blame] | 211 | " %d tid %d addr %s pool=0x%lX\n", |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 212 | i, tid, print_mac(mac, sta->addr), |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 213 | local->queue_pool[0]); |
| 214 | } |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 215 | #endif /* CONFIG_MAC80211_HT_DEBUG */ |
| 216 | return 0; |
| 217 | } |
| 218 | |
| 219 | return -EAGAIN; |
| 220 | } |
| 221 | |
| 222 | /** |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 223 | * the caller needs to hold netdev_get_tx_queue(local->mdev, X)->lock |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 224 | */ |
| 225 | void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local, |
| 226 | struct sta_info *sta, u16 tid, |
| 227 | u8 requeue) |
| 228 | { |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 229 | int agg_queue = sta->tid_to_tx_q[tid]; |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 230 | struct ieee80211_hw *hw = &local->hw; |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 231 | |
| 232 | /* return the qdisc to the pool */ |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 233 | clear_bit(agg_queue, local->queue_pool); |
| 234 | sta->tid_to_tx_q[tid] = ieee80211_num_queues(hw); |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 235 | |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 236 | if (requeue) { |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 237 | ieee80211_requeue(local, agg_queue); |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 238 | } else { |
| 239 | struct netdev_queue *txq; |
David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 240 | spinlock_t *root_lock; |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 241 | |
| 242 | txq = netdev_get_tx_queue(local->mdev, agg_queue); |
David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 243 | root_lock = qdisc_root_lock(txq->qdisc); |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 244 | |
David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 245 | spin_lock_bh(root_lock); |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 246 | qdisc_reset(txq->qdisc); |
David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 247 | spin_unlock_bh(root_lock); |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 248 | } |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 249 | } |
| 250 | |
| 251 | void ieee80211_requeue(struct ieee80211_local *local, int queue) |
| 252 | { |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 253 | struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, queue); |
| 254 | struct sk_buff_head list; |
David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 255 | spinlock_t *root_lock; |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 256 | struct Qdisc *qdisc; |
Ron Rindjunsky | 0da926f | 2008-04-23 13:45:12 +0300 | [diff] [blame] | 257 | u32 len; |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 258 | |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 259 | rcu_read_lock_bh(); |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 260 | |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 261 | qdisc = rcu_dereference(txq->qdisc); |
| 262 | if (!qdisc || !qdisc->dequeue) |
| 263 | goto out_unlock; |
| 264 | |
| 265 | skb_queue_head_init(&list); |
| 266 | |
David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 267 | root_lock = qdisc_root_lock(qdisc); |
| 268 | spin_lock(root_lock); |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 269 | for (len = qdisc->q.qlen; len > 0; len--) { |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 270 | struct sk_buff *skb = qdisc->dequeue(qdisc); |
| 271 | |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 272 | if (skb) |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 273 | __skb_queue_tail(&list, skb); |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 274 | } |
David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 275 | spin_unlock(root_lock); |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 276 | |
| 277 | for (len = list.qlen; len > 0; len--) { |
| 278 | struct sk_buff *skb = __skb_dequeue(&list); |
| 279 | u16 new_queue; |
| 280 | |
| 281 | BUG_ON(!skb); |
| 282 | new_queue = ieee80211_select_queue(local->mdev, skb); |
| 283 | skb_set_queue_mapping(skb, new_queue); |
| 284 | |
| 285 | txq = netdev_get_tx_queue(local->mdev, new_queue); |
| 286 | |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 287 | |
| 288 | qdisc = rcu_dereference(txq->qdisc); |
David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 289 | root_lock = qdisc_root_lock(qdisc); |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 290 | |
David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 291 | spin_lock(root_lock); |
Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 292 | qdisc_enqueue_root(skb, qdisc); |
David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 293 | spin_unlock(root_lock); |
David S. Miller | 51cb6db | 2008-07-15 03:34:57 -0700 | [diff] [blame] | 294 | } |
| 295 | |
| 296 | out_unlock: |
| 297 | rcu_read_unlock_bh(); |
Ron Rindjunsky | 9e72349 | 2008-01-28 14:07:18 +0200 | [diff] [blame] | 298 | } |