blob: 82ee786bba6337ff9c69ed8fd499514bc7594757 [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03008 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon854c5702016-01-26 13:17:47 +020010 * Copyright(c) 2016 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010011 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020027 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010028 *
29 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020030 * Intel Linux Wireless <linuxwifi@intel.com>
Johannes Berg8ca151b2013-01-24 14:25:36 +010031 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +030035 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon854c5702016-01-26 13:17:47 +020037 * Copyright(c) 2016 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010038 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67#include <net/mac80211.h>
68
69#include "mvm.h"
70#include "sta.h"
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +030071#include "rs.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010072
Sara Sharon854c5702016-01-26 13:17:47 +020073/*
74 * New version of ADD_STA_sta command added new fields at the end of the
75 * structure, so sending the size of the relevant API's structure is enough to
76 * support both API versions.
77 */
78static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
79{
80 return iwl_mvm_has_new_rx_api(mvm) ?
81 sizeof(struct iwl_mvm_add_sta_cmd) :
82 sizeof(struct iwl_mvm_add_sta_cmd_v7);
83}
84
Eliad Pellerb92e6612014-01-23 17:58:23 +020085static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
86 enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +010087{
88 int sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +020089 u32 reserved_ids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +010090
Eliad Pellerb92e6612014-01-23 17:58:23 +020091 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
Johannes Berg8ca151b2013-01-24 14:25:36 +010092 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
93
94 lockdep_assert_held(&mvm->mutex);
95
Eliad Pellerb92e6612014-01-23 17:58:23 +020096 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
97 if (iftype != NL80211_IFTYPE_STATION)
98 reserved_ids = BIT(0);
99
Johannes Berg8ca151b2013-01-24 14:25:36 +0100100 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
Eliad Pellerb92e6612014-01-23 17:58:23 +0200101 for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
102 if (BIT(sta_id) & reserved_ids)
103 continue;
104
Johannes Berg8ca151b2013-01-24 14:25:36 +0100105 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
106 lockdep_is_held(&mvm->mutex)))
107 return sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +0200108 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100109 return IWL_MVM_STATION_COUNT;
110}
111
Johannes Berg7a453972013-02-12 13:10:44 +0100112/* send station add/update command to firmware */
113int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Liad Kaufman24afba72015-07-28 18:56:08 +0300114 bool update, unsigned int flags)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100115{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +0100116 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300117 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
118 .sta_id = mvm_sta->sta_id,
119 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
120 .add_modify = update ? 1 : 0,
121 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
122 STA_FLG_MIMO_EN_MSK),
Liad Kaufmancf0cda12015-09-24 10:44:12 +0200123 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300124 };
Johannes Berg8ca151b2013-01-24 14:25:36 +0100125 int ret;
126 u32 status;
127 u32 agg_size = 0, mpdu_dens = 0;
128
Liad Kaufman24afba72015-07-28 18:56:08 +0300129 if (!update || (flags & STA_MODIFY_QUEUES)) {
Johannes Berg7a453972013-02-12 13:10:44 +0100130 add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
131 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
Liad Kaufman24afba72015-07-28 18:56:08 +0300132
133 if (flags & STA_MODIFY_QUEUES)
134 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
Johannes Berg7a453972013-02-12 13:10:44 +0100135 }
Johannes Berg5bc5aaa2013-02-12 14:35:36 +0100136
137 switch (sta->bandwidth) {
138 case IEEE80211_STA_RX_BW_160:
139 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
140 /* fall through */
141 case IEEE80211_STA_RX_BW_80:
142 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
143 /* fall through */
144 case IEEE80211_STA_RX_BW_40:
145 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
146 /* fall through */
147 case IEEE80211_STA_RX_BW_20:
148 if (sta->ht_cap.ht_supported)
149 add_sta_cmd.station_flags |=
150 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
151 break;
152 }
153
154 switch (sta->rx_nss) {
155 case 1:
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
157 break;
158 case 2:
159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
160 break;
161 case 3 ... 8:
162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
163 break;
164 }
165
166 switch (sta->smps_mode) {
167 case IEEE80211_SMPS_AUTOMATIC:
168 case IEEE80211_SMPS_NUM_MODES:
169 WARN_ON(1);
170 break;
171 case IEEE80211_SMPS_STATIC:
172 /* override NSS */
173 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
174 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
175 break;
176 case IEEE80211_SMPS_DYNAMIC:
177 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
178 break;
179 case IEEE80211_SMPS_OFF:
180 /* nothing */
181 break;
182 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100183
184 if (sta->ht_cap.ht_supported) {
185 add_sta_cmd.station_flags_msk |=
186 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
187 STA_FLG_AGG_MPDU_DENS_MSK);
188
189 mpdu_dens = sta->ht_cap.ampdu_density;
190 }
191
192 if (sta->vht_cap.vht_supported) {
193 agg_size = sta->vht_cap.cap &
194 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
195 agg_size >>=
196 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
197 } else if (sta->ht_cap.ht_supported) {
198 agg_size = sta->ht_cap.ampdu_factor;
199 }
200
201 add_sta_cmd.station_flags |=
202 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
203 add_sta_cmd.station_flags |=
204 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
205
Johannes Berg65e25482016-04-13 14:24:22 +0200206 if (sta->wme) {
207 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
208
209 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
210 add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BK);
211 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
212 add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BE);
213 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
214 add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VI);
215 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
216 add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VO);
217 }
218
Johannes Berg8ca151b2013-01-24 14:25:36 +0100219 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +0200220 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
221 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +0300222 &add_sta_cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100223 if (ret)
224 return ret;
225
Sara Sharon837c4da2016-01-07 16:50:45 +0200226 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100227 case ADD_STA_SUCCESS:
228 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
229 break;
230 default:
231 ret = -EIO;
232 IWL_ERR(mvm, "ADD_STA failed\n");
233 break;
234 }
235
236 return ret;
237}
238
Sara Sharon10b2b202016-03-20 16:23:41 +0200239static void iwl_mvm_rx_agg_session_expired(unsigned long data)
240{
241 struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
242 struct iwl_mvm_baid_data *ba_data;
243 struct ieee80211_sta *sta;
244 struct iwl_mvm_sta *mvm_sta;
245 unsigned long timeout;
246
247 rcu_read_lock();
248
249 ba_data = rcu_dereference(*rcu_ptr);
250
251 if (WARN_ON(!ba_data))
252 goto unlock;
253
254 if (!ba_data->timeout)
255 goto unlock;
256
257 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
258 if (time_is_after_jiffies(timeout)) {
259 mod_timer(&ba_data->session_timer, timeout);
260 goto unlock;
261 }
262
263 /* Timer expired */
264 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
265 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
266 ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
267 sta->addr, ba_data->tid);
268unlock:
269 rcu_read_unlock();
270}
271
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300272static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
273 struct ieee80211_sta *sta)
274{
275 unsigned long used_hw_queues;
276 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +0200277 unsigned int wdg_timeout =
278 iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300279 u32 ac;
280
281 lockdep_assert_held(&mvm->mutex);
282
283 used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
284
285 /* Find available queues, and allocate them to the ACs */
286 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
287 u8 queue = find_first_zero_bit(&used_hw_queues,
288 mvm->first_agg_queue);
289
290 if (queue >= mvm->first_agg_queue) {
291 IWL_ERR(mvm, "Failed to allocate STA queue\n");
292 return -EBUSY;
293 }
294
295 __set_bit(queue, &used_hw_queues);
296 mvmsta->hw_queue[ac] = queue;
297 }
298
299 /* Found a place for all queues - enable them */
300 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
301 iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300302 mvmsta->hw_queue[ac],
Liad Kaufman5c1156e2015-07-22 17:59:53 +0300303 iwl_mvm_ac_to_tx_fifo[ac], 0,
304 wdg_timeout);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300305 mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
306 }
307
308 return 0;
309}
310
311static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
312 struct ieee80211_sta *sta)
313{
314 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
315 unsigned long sta_msk;
316 int i;
317
318 lockdep_assert_held(&mvm->mutex);
319
320 /* disable the TDLS STA-specific queues */
321 sta_msk = mvmsta->tfd_queue_msk;
Emmanuel Grumbacha4ca3ed2015-01-20 17:07:10 +0200322 for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
Arik Nemtsov06ecdba2015-10-12 14:47:11 +0300323 iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300324}
325
Liad Kaufman9794c642015-08-19 17:34:28 +0300326/* Disable aggregations for a bitmap of TIDs for a given station */
327static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
328 unsigned long disable_agg_tids,
329 bool remove_queue)
330{
331 struct iwl_mvm_add_sta_cmd cmd = {};
332 struct ieee80211_sta *sta;
333 struct iwl_mvm_sta *mvmsta;
334 u32 status;
335 u8 sta_id;
336 int ret;
337
338 spin_lock_bh(&mvm->queue_info_lock);
339 sta_id = mvm->queue_info[queue].ra_sta_id;
340 spin_unlock_bh(&mvm->queue_info_lock);
341
342 rcu_read_lock();
343
344 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
345
346 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
347 rcu_read_unlock();
348 return -EINVAL;
349 }
350
351 mvmsta = iwl_mvm_sta_from_mac80211(sta);
352
353 mvmsta->tid_disable_agg |= disable_agg_tids;
354
355 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
356 cmd.sta_id = mvmsta->sta_id;
357 cmd.add_modify = STA_MODE_MODIFY;
358 cmd.modify_mask = STA_MODIFY_QUEUES;
359 if (disable_agg_tids)
360 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
361 if (remove_queue)
362 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
363 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
364 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
365
366 rcu_read_unlock();
367
368 /* Notify FW of queue removal from the STA queues */
369 status = ADD_STA_SUCCESS;
370 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
371 iwl_mvm_add_sta_cmd_size(mvm),
372 &cmd, &status);
373
374 return ret;
375}
376
Liad Kaufman42db09c2016-05-02 14:01:14 +0300377static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
378{
379 struct ieee80211_sta *sta;
380 struct iwl_mvm_sta *mvmsta;
381 unsigned long tid_bitmap;
382 unsigned long agg_tids = 0;
383 s8 sta_id;
384 int tid;
385
386 lockdep_assert_held(&mvm->mutex);
387
388 spin_lock_bh(&mvm->queue_info_lock);
389 sta_id = mvm->queue_info[queue].ra_sta_id;
390 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
391 spin_unlock_bh(&mvm->queue_info_lock);
392
393 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
394 lockdep_is_held(&mvm->mutex));
395
396 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
397 return -EINVAL;
398
399 mvmsta = iwl_mvm_sta_from_mac80211(sta);
400
401 spin_lock_bh(&mvmsta->lock);
402 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
403 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
404 agg_tids |= BIT(tid);
405 }
406 spin_unlock_bh(&mvmsta->lock);
407
408 return agg_tids;
409}
410
Liad Kaufman9794c642015-08-19 17:34:28 +0300411/*
412 * Remove a queue from a station's resources.
413 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
414 * doesn't disable the queue
415 */
416static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
417{
418 struct ieee80211_sta *sta;
419 struct iwl_mvm_sta *mvmsta;
420 unsigned long tid_bitmap;
421 unsigned long disable_agg_tids = 0;
422 u8 sta_id;
423 int tid;
424
425 lockdep_assert_held(&mvm->mutex);
426
427 spin_lock_bh(&mvm->queue_info_lock);
428 sta_id = mvm->queue_info[queue].ra_sta_id;
429 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
430 spin_unlock_bh(&mvm->queue_info_lock);
431
432 rcu_read_lock();
433
434 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
435
436 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
437 rcu_read_unlock();
438 return 0;
439 }
440
441 mvmsta = iwl_mvm_sta_from_mac80211(sta);
442
443 spin_lock_bh(&mvmsta->lock);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300444 /* Unmap MAC queues and TIDs from this queue */
Liad Kaufman9794c642015-08-19 17:34:28 +0300445 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300446 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
447 disable_agg_tids |= BIT(tid);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300448 mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
Liad Kaufman9794c642015-08-19 17:34:28 +0300449 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300450
Liad Kaufman42db09c2016-05-02 14:01:14 +0300451 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
Liad Kaufman9794c642015-08-19 17:34:28 +0300452 spin_unlock_bh(&mvmsta->lock);
453
454 rcu_read_unlock();
455
Liad Kaufman42db09c2016-05-02 14:01:14 +0300456 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman9794c642015-08-19 17:34:28 +0300457 /* Unmap MAC queues and TIDs from this queue */
458 mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
459 mvm->queue_info[queue].hw_queue_refcount = 0;
460 mvm->queue_info[queue].tid_bitmap = 0;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300461 spin_unlock_bh(&mvm->queue_info_lock);
Liad Kaufman9794c642015-08-19 17:34:28 +0300462
463 return disable_agg_tids;
464}
465
Liad Kaufman42db09c2016-05-02 14:01:14 +0300466static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
467 unsigned long tfd_queue_mask, u8 ac)
468{
469 int queue = 0;
470 u8 ac_to_queue[IEEE80211_NUM_ACS];
471 int i;
472
473 lockdep_assert_held(&mvm->queue_info_lock);
474
475 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
476
477 /* See what ACs the existing queues for this STA have */
478 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
479 /* Only DATA queues can be shared */
480 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
481 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
482 continue;
483
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200484 /* Don't try and take queues being reconfigured */
485 if (mvm->queue_info[queue].status ==
486 IWL_MVM_QUEUE_RECONFIGURING)
487 continue;
488
Liad Kaufman42db09c2016-05-02 14:01:14 +0300489 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
490 }
491
492 /*
493 * The queue to share is chosen only from DATA queues as follows (in
494 * descending priority):
495 * 1. An AC_BE queue
496 * 2. Same AC queue
497 * 3. Highest AC queue that is lower than new AC
498 * 4. Any existing AC (there always is at least 1 DATA queue)
499 */
500
501 /* Priority 1: An AC_BE queue */
502 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
503 queue = ac_to_queue[IEEE80211_AC_BE];
504 /* Priority 2: Same AC queue */
505 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
506 queue = ac_to_queue[ac];
507 /* Priority 3a: If new AC is VO and VI exists - use VI */
508 else if (ac == IEEE80211_AC_VO &&
509 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
510 queue = ac_to_queue[IEEE80211_AC_VI];
511 /* Priority 3b: No BE so only AC less than the new one is BK */
512 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
513 queue = ac_to_queue[IEEE80211_AC_BK];
514 /* Priority 4a: No BE nor BK - use VI if exists */
515 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
516 queue = ac_to_queue[IEEE80211_AC_VI];
517 /* Priority 4b: No BE, BK nor VI - use VO if exists */
518 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
519 queue = ac_to_queue[IEEE80211_AC_VO];
520
521 /* Make sure queue found (or not) is legal */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200522 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
523 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
524 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
Liad Kaufman42db09c2016-05-02 14:01:14 +0300525 IWL_ERR(mvm, "No DATA queues available to share\n");
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200526 return -ENOSPC;
527 }
528
529 /* Make sure the queue isn't in the middle of being reconfigured */
530 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
531 IWL_ERR(mvm,
532 "TXQ %d is in the middle of re-config - try again\n",
533 queue);
534 return -EBUSY;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300535 }
536
537 return queue;
538}
539
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200540/*
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200541 * If a given queue has a higher AC than the TID stream that is being compared
542 * to, the queue needs to be redirected to the lower AC. This function does that
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200543 * in such a case, otherwise - if no redirection required - it does nothing,
544 * unless the %force param is true.
545 */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200546int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
547 int ac, int ssn, unsigned int wdg_timeout,
548 bool force)
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200549{
550 struct iwl_scd_txq_cfg_cmd cmd = {
551 .scd_queue = queue,
Liad Kaufmanf7c692d2016-03-08 10:41:32 +0200552 .action = SCD_CFG_DISABLE_QUEUE,
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200553 };
554 bool shared_queue;
555 unsigned long mq;
556 int ret;
557
558 /*
559 * If the AC is lower than current one - FIFO needs to be redirected to
560 * the lowest one of the streams in the queue. Check if this is needed
561 * here.
562 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
563 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
564 * we need to check if the numerical value of X is LARGER than of Y.
565 */
566 spin_lock_bh(&mvm->queue_info_lock);
567 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
568 spin_unlock_bh(&mvm->queue_info_lock);
569
570 IWL_DEBUG_TX_QUEUES(mvm,
571 "No redirection needed on TXQ #%d\n",
572 queue);
573 return 0;
574 }
575
576 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
577 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200578 cmd.tid = mvm->queue_info[queue].txq_tid;
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200579 mq = mvm->queue_info[queue].hw_queue_to_mac80211;
580 shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
581 spin_unlock_bh(&mvm->queue_info_lock);
582
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200583 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200584 queue, iwl_mvm_ac_to_tx_fifo[ac]);
585
586 /* Stop MAC queues and wait for this queue to empty */
587 iwl_mvm_stop_mac_queues(mvm, mq);
588 ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue));
589 if (ret) {
590 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
591 queue);
592 ret = -EIO;
593 goto out;
594 }
595
596 /* Before redirecting the queue we need to de-activate it */
597 iwl_trans_txq_disable(mvm->trans, queue, false);
598 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
599 if (ret)
600 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
601 ret);
602
603 /* Make sure the SCD wrptr is correctly set before reconfiguring */
Sara Sharonca3b9c62016-06-30 16:14:02 +0300604 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200605
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200606 /* Update the TID "owner" of the queue */
607 spin_lock_bh(&mvm->queue_info_lock);
608 mvm->queue_info[queue].txq_tid = tid;
609 spin_unlock_bh(&mvm->queue_info_lock);
610
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200611 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
612
613 /* Redirect to lower AC */
614 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
615 cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
616 ssn);
617
618 /* Update AC marking of the queue */
619 spin_lock_bh(&mvm->queue_info_lock);
620 mvm->queue_info[queue].mac80211_ac = ac;
621 spin_unlock_bh(&mvm->queue_info_lock);
622
623 /*
624 * Mark queue as shared in transport if shared
625 * Note this has to be done after queue enablement because enablement
626 * can also set this value, and there is no indication there to shared
627 * queues
628 */
629 if (shared_queue)
630 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
631
632out:
633 /* Continue using the MAC queues */
634 iwl_mvm_start_mac_queues(mvm, mq);
635
636 return ret;
637}
638
Liad Kaufman24afba72015-07-28 18:56:08 +0300639static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
640 struct ieee80211_sta *sta, u8 ac, int tid,
641 struct ieee80211_hdr *hdr)
642{
643 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
644 struct iwl_trans_txq_scd_cfg cfg = {
645 .fifo = iwl_mvm_ac_to_tx_fifo[ac],
646 .sta_id = mvmsta->sta_id,
647 .tid = tid,
648 .frame_limit = IWL_FRAME_LIMIT,
649 };
650 unsigned int wdg_timeout =
651 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
652 u8 mac_queue = mvmsta->vif->hw_queue[ac];
653 int queue = -1;
Liad Kaufman9794c642015-08-19 17:34:28 +0300654 bool using_inactive_queue = false;
655 unsigned long disable_agg_tids = 0;
656 enum iwl_mvm_agg_state queue_state;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300657 bool shared_queue = false;
Liad Kaufman24afba72015-07-28 18:56:08 +0300658 int ssn;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300659 unsigned long tfd_queue_mask;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300660 int ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300661
662 lockdep_assert_held(&mvm->mutex);
663
Liad Kaufman42db09c2016-05-02 14:01:14 +0300664 spin_lock_bh(&mvmsta->lock);
665 tfd_queue_mask = mvmsta->tfd_queue_msk;
666 spin_unlock_bh(&mvmsta->lock);
667
Liad Kaufmand2515a92016-03-23 16:31:08 +0200668 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300669
670 /*
671 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
672 * exists
673 */
674 if (!ieee80211_is_data_qos(hdr->frame_control) ||
675 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300676 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
677 IWL_MVM_DQA_MIN_MGMT_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300678 IWL_MVM_DQA_MAX_MGMT_QUEUE);
679 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
680 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
681 queue);
682
683 /* If no such queue is found, we'll use a DATA queue instead */
684 }
685
Liad Kaufman9794c642015-08-19 17:34:28 +0300686 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
687 (mvm->queue_info[mvmsta->reserved_queue].status ==
688 IWL_MVM_QUEUE_RESERVED ||
689 mvm->queue_info[mvmsta->reserved_queue].status ==
690 IWL_MVM_QUEUE_INACTIVE)) {
Liad Kaufman24afba72015-07-28 18:56:08 +0300691 queue = mvmsta->reserved_queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300692 mvm->queue_info[queue].reserved = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300693 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
694 }
695
696 if (queue < 0)
Liad Kaufman9794c642015-08-19 17:34:28 +0300697 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
698 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300699 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufmancf961e12015-08-13 19:16:08 +0300700
701 /*
Liad Kaufman9794c642015-08-19 17:34:28 +0300702 * Check if this queue is already allocated but inactive.
703 * In such a case, we'll need to first free this queue before enabling
704 * it again, so we'll mark it as reserved to make sure no new traffic
705 * arrives on it
706 */
707 if (queue > 0 &&
708 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
709 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
710 using_inactive_queue = true;
711 IWL_DEBUG_TX_QUEUES(mvm,
712 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
713 queue, mvmsta->sta_id, tid);
714 }
715
Liad Kaufman42db09c2016-05-02 14:01:14 +0300716 /* No free queue - we'll have to share */
717 if (queue <= 0) {
718 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
719 if (queue > 0) {
720 shared_queue = true;
721 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
722 }
723 }
724
Liad Kaufman9794c642015-08-19 17:34:28 +0300725 /*
Liad Kaufmancf961e12015-08-13 19:16:08 +0300726 * Mark TXQ as ready, even though it hasn't been fully configured yet,
727 * to make sure no one else takes it.
728 * This will allow avoiding re-acquiring the lock at the end of the
729 * configuration. On error we'll mark it back as free.
730 */
Liad Kaufman42db09c2016-05-02 14:01:14 +0300731 if ((queue > 0) && !shared_queue)
Liad Kaufmancf961e12015-08-13 19:16:08 +0300732 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman24afba72015-07-28 18:56:08 +0300733
Liad Kaufmand2515a92016-03-23 16:31:08 +0200734 spin_unlock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300735
Liad Kaufman42db09c2016-05-02 14:01:14 +0300736 /* This shouldn't happen - out of queues */
737 if (WARN_ON(queue <= 0)) {
738 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
739 tid, cfg.sta_id);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200740 return queue;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300741 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300742
743 /*
744 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
745 * but for configuring the SCD to send A-MPDUs we need to mark the queue
746 * as aggregatable.
747 * Mark all DATA queues as allowing to be aggregated at some point
748 */
Liad Kaufmand5216a22015-08-09 15:50:51 +0300749 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
750 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +0300751
Liad Kaufman9794c642015-08-19 17:34:28 +0300752 /*
753 * If this queue was previously inactive (idle) - we need to free it
754 * first
755 */
756 if (using_inactive_queue) {
757 struct iwl_scd_txq_cfg_cmd cmd = {
758 .scd_queue = queue,
Liad Kaufmanf7c692d2016-03-08 10:41:32 +0200759 .action = SCD_CFG_DISABLE_QUEUE,
Liad Kaufman9794c642015-08-19 17:34:28 +0300760 };
Liad Kaufmand55092b2016-08-03 18:41:27 +0300761 u8 txq_curr_ac;
Liad Kaufman9794c642015-08-19 17:34:28 +0300762
763 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
764
Liad Kaufman93f436e2015-08-31 13:41:26 +0300765 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmand55092b2016-08-03 18:41:27 +0300766 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
Liad Kaufman93f436e2015-08-31 13:41:26 +0300767 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
Liad Kaufmand55092b2016-08-03 18:41:27 +0300768 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[txq_curr_ac];
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200769 cmd.tid = mvm->queue_info[queue].txq_tid;
Liad Kaufman93f436e2015-08-31 13:41:26 +0300770 spin_unlock_bh(&mvm->queue_info_lock);
771
Liad Kaufman9794c642015-08-19 17:34:28 +0300772 /* Disable the queue */
Liad Kaufman8d98ae62016-02-02 16:02:46 +0200773 if (disable_agg_tids)
774 iwl_mvm_invalidate_sta_queue(mvm, queue,
775 disable_agg_tids, false);
Liad Kaufman9794c642015-08-19 17:34:28 +0300776 iwl_trans_txq_disable(mvm->trans, queue, false);
777 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
778 &cmd);
779 if (ret) {
780 IWL_ERR(mvm,
781 "Failed to free inactive queue %d (ret=%d)\n",
782 queue, ret);
783
784 /* Re-mark the inactive queue as inactive */
785 spin_lock_bh(&mvm->queue_info_lock);
786 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
787 spin_unlock_bh(&mvm->queue_info_lock);
788
789 return ret;
790 }
Liad Kaufman8d98ae62016-02-02 16:02:46 +0200791
792 /* If TXQ is allocated to another STA, update removal in FW */
793 if (cmd.sta_id != mvmsta->sta_id)
794 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
Liad Kaufman9794c642015-08-19 17:34:28 +0300795 }
796
Liad Kaufman42db09c2016-05-02 14:01:14 +0300797 IWL_DEBUG_TX_QUEUES(mvm,
798 "Allocating %squeue #%d to sta %d on tid %d\n",
799 shared_queue ? "shared " : "", queue,
800 mvmsta->sta_id, tid);
801
802 if (shared_queue) {
803 /* Disable any open aggs on this queue */
804 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
805
806 if (disable_agg_tids) {
807 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
808 queue);
809 iwl_mvm_invalidate_sta_queue(mvm, queue,
810 disable_agg_tids, false);
811 }
Liad Kaufman42db09c2016-05-02 14:01:14 +0300812 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300813
814 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
815 iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
816 wdg_timeout);
817
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200818 /*
819 * Mark queue as shared in transport if shared
820 * Note this has to be done after queue enablement because enablement
821 * can also set this value, and there is no indication there to shared
822 * queues
823 */
824 if (shared_queue)
825 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
826
Liad Kaufman24afba72015-07-28 18:56:08 +0300827 spin_lock_bh(&mvmsta->lock);
828 mvmsta->tid_data[tid].txq_id = queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300829 mvmsta->tid_data[tid].is_tid_active = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300830 mvmsta->tfd_queue_msk |= BIT(queue);
Liad Kaufman9794c642015-08-19 17:34:28 +0300831 queue_state = mvmsta->tid_data[tid].state;
Liad Kaufman24afba72015-07-28 18:56:08 +0300832
833 if (mvmsta->reserved_queue == queue)
834 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
835 spin_unlock_bh(&mvmsta->lock);
836
Liad Kaufman42db09c2016-05-02 14:01:14 +0300837 if (!shared_queue) {
838 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
839 if (ret)
840 goto out_err;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300841
Liad Kaufman42db09c2016-05-02 14:01:14 +0300842 /* If we need to re-enable aggregations... */
843 if (queue_state == IWL_AGG_ON) {
844 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
845 if (ret)
846 goto out_err;
847 }
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200848 } else {
849 /* Redirect queue, if needed */
850 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
851 wdg_timeout, false);
852 if (ret)
853 goto out_err;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300854 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300855
Liad Kaufman42db09c2016-05-02 14:01:14 +0300856 return 0;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300857
858out_err:
859 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
860
861 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300862}
863
Liad Kaufman19aefa42016-03-08 14:29:51 +0200864static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
865{
866 struct iwl_scd_txq_cfg_cmd cmd = {
867 .scd_queue = queue,
868 .action = SCD_CFG_UPDATE_QUEUE_TID,
869 };
870 s8 sta_id;
871 int tid;
872 unsigned long tid_bitmap;
873 int ret;
874
875 lockdep_assert_held(&mvm->mutex);
876
877 spin_lock_bh(&mvm->queue_info_lock);
878 sta_id = mvm->queue_info[queue].ra_sta_id;
879 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
880 spin_unlock_bh(&mvm->queue_info_lock);
881
882 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
883 return;
884
885 /* Find any TID for queue */
886 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
887 cmd.tid = tid;
888 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
889
890 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
Liad Kaufman341ca402016-09-18 14:51:59 +0300891 if (ret) {
Liad Kaufman19aefa42016-03-08 14:29:51 +0200892 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
893 queue, ret);
Liad Kaufman341ca402016-09-18 14:51:59 +0300894 return;
895 }
896
897 spin_lock_bh(&mvm->queue_info_lock);
898 mvm->queue_info[queue].txq_tid = tid;
899 spin_unlock_bh(&mvm->queue_info_lock);
900 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
901 queue, tid);
Liad Kaufman19aefa42016-03-08 14:29:51 +0200902}
903
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200904static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
905{
906 struct ieee80211_sta *sta;
907 struct iwl_mvm_sta *mvmsta;
908 s8 sta_id;
909 int tid = -1;
910 unsigned long tid_bitmap;
911 unsigned int wdg_timeout;
912 int ssn;
913 int ret = true;
914
915 lockdep_assert_held(&mvm->mutex);
916
917 spin_lock_bh(&mvm->queue_info_lock);
918 sta_id = mvm->queue_info[queue].ra_sta_id;
919 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
920 spin_unlock_bh(&mvm->queue_info_lock);
921
922 /* Find TID for queue, and make sure it is the only one on the queue */
923 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
924 if (tid_bitmap != BIT(tid)) {
925 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
926 queue, tid_bitmap);
927 return;
928 }
929
930 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
931 tid);
932
933 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
934 lockdep_is_held(&mvm->mutex));
935
936 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
937 return;
938
939 mvmsta = iwl_mvm_sta_from_mac80211(sta);
940 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
941
942 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
943
944 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
945 tid_to_mac80211_ac[tid], ssn,
946 wdg_timeout, true);
947 if (ret) {
948 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
949 return;
950 }
951
952 /* If aggs should be turned back on - do it */
953 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
Emmanuel Grumbach9cd70e82016-09-20 13:40:33 +0300954 struct iwl_mvm_add_sta_cmd cmd = {0};
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200955
956 mvmsta->tid_disable_agg &= ~BIT(tid);
957
958 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
959 cmd.sta_id = mvmsta->sta_id;
960 cmd.add_modify = STA_MODE_MODIFY;
961 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
962 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
963 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
964
965 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
966 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
967 if (!ret) {
968 IWL_DEBUG_TX_QUEUES(mvm,
969 "TXQ #%d is now aggregated again\n",
970 queue);
971
972 /* Mark queue intenally as aggregating again */
973 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
974 }
975 }
976
977 spin_lock_bh(&mvm->queue_info_lock);
978 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
979 spin_unlock_bh(&mvm->queue_info_lock);
980}
981
Liad Kaufman24afba72015-07-28 18:56:08 +0300982static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
983{
984 if (tid == IWL_MAX_TID_COUNT)
985 return IEEE80211_AC_VO; /* MGMT */
986
987 return tid_to_mac80211_ac[tid];
988}
989
990static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
991 struct ieee80211_sta *sta, int tid)
992{
993 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
994 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
995 struct sk_buff *skb;
996 struct ieee80211_hdr *hdr;
997 struct sk_buff_head deferred_tx;
998 u8 mac_queue;
999 bool no_queue = false; /* Marks if there is a problem with the queue */
1000 u8 ac;
1001
1002 lockdep_assert_held(&mvm->mutex);
1003
1004 skb = skb_peek(&tid_data->deferred_tx_frames);
1005 if (!skb)
1006 return;
1007 hdr = (void *)skb->data;
1008
1009 ac = iwl_mvm_tid_to_ac_queue(tid);
1010 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1011
1012 if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
1013 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1014 IWL_ERR(mvm,
1015 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1016 mvmsta->sta_id, tid);
1017
1018 /*
1019 * Mark queue as problematic so later the deferred traffic is
1020 * freed, as we can do nothing with it
1021 */
1022 no_queue = true;
1023 }
1024
1025 __skb_queue_head_init(&deferred_tx);
1026
Liad Kaufmand2515a92016-03-23 16:31:08 +02001027 /* Disable bottom-halves when entering TX path */
1028 local_bh_disable();
Liad Kaufman24afba72015-07-28 18:56:08 +03001029 spin_lock(&mvmsta->lock);
1030 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
Liad Kaufmanad5de732016-09-27 16:01:10 +03001031 mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
Liad Kaufman24afba72015-07-28 18:56:08 +03001032 spin_unlock(&mvmsta->lock);
1033
Liad Kaufman24afba72015-07-28 18:56:08 +03001034 while ((skb = __skb_dequeue(&deferred_tx)))
1035 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1036 ieee80211_free_txskb(mvm->hw, skb);
1037 local_bh_enable();
1038
1039 /* Wake queue */
1040 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1041}
1042
1043void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1044{
1045 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1046 add_stream_wk);
1047 struct ieee80211_sta *sta;
1048 struct iwl_mvm_sta *mvmsta;
1049 unsigned long deferred_tid_traffic;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001050 int queue, sta_id, tid;
Liad Kaufman24afba72015-07-28 18:56:08 +03001051
Liad Kaufman9794c642015-08-19 17:34:28 +03001052 /* Check inactivity of queues */
1053 iwl_mvm_inactivity_check(mvm);
1054
Liad Kaufman24afba72015-07-28 18:56:08 +03001055 mutex_lock(&mvm->mutex);
1056
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001057 /* Reconfigure queues requiring reconfiguation */
1058 for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) {
1059 bool reconfig;
Liad Kaufman19aefa42016-03-08 14:29:51 +02001060 bool change_owner;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001061
1062 spin_lock_bh(&mvm->queue_info_lock);
1063 reconfig = (mvm->queue_info[queue].status ==
1064 IWL_MVM_QUEUE_RECONFIGURING);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001065
1066 /*
1067 * We need to take into account a situation in which a TXQ was
1068 * allocated to TID x, and then turned shared by adding TIDs y
1069 * and z. If TID x becomes inactive and is removed from the TXQ,
1070 * ownership must be given to one of the remaining TIDs.
1071 * This is mainly because if TID x continues - a new queue can't
1072 * be allocated for it as long as it is an owner of another TXQ.
1073 */
1074 change_owner = !(mvm->queue_info[queue].tid_bitmap &
1075 BIT(mvm->queue_info[queue].txq_tid)) &&
1076 (mvm->queue_info[queue].status ==
1077 IWL_MVM_QUEUE_SHARED);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001078 spin_unlock_bh(&mvm->queue_info_lock);
1079
1080 if (reconfig)
1081 iwl_mvm_unshare_queue(mvm, queue);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001082 else if (change_owner)
1083 iwl_mvm_change_queue_owner(mvm, queue);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001084 }
1085
Liad Kaufman24afba72015-07-28 18:56:08 +03001086 /* Go over all stations with deferred traffic */
1087 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1088 IWL_MVM_STATION_COUNT) {
1089 clear_bit(sta_id, mvm->sta_deferred_frames);
1090 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1091 lockdep_is_held(&mvm->mutex));
1092 if (IS_ERR_OR_NULL(sta))
1093 continue;
1094
1095 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1096 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1097
1098 for_each_set_bit(tid, &deferred_tid_traffic,
1099 IWL_MAX_TID_COUNT + 1)
1100 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1101 }
1102
1103 mutex_unlock(&mvm->mutex);
1104}
1105
1106static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001107 struct ieee80211_sta *sta,
1108 enum nl80211_iftype vif_type)
Liad Kaufman24afba72015-07-28 18:56:08 +03001109{
1110 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1111 int queue;
1112
Liad Kaufman9794c642015-08-19 17:34:28 +03001113 /*
1114 * Check for inactive queues, so we don't reach a situation where we
1115 * can't add a STA due to a shortage in queues that doesn't really exist
1116 */
1117 iwl_mvm_inactivity_check(mvm);
1118
Liad Kaufman24afba72015-07-28 18:56:08 +03001119 spin_lock_bh(&mvm->queue_info_lock);
1120
1121 /* Make sure we have free resources for this STA */
Liad Kaufmand5216a22015-08-09 15:50:51 +03001122 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1123 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
Liad Kaufmancf961e12015-08-13 19:16:08 +03001124 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1125 IWL_MVM_QUEUE_FREE))
Liad Kaufmand5216a22015-08-09 15:50:51 +03001126 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1127 else
Liad Kaufman9794c642015-08-19 17:34:28 +03001128 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1129 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001130 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +03001131 if (queue < 0) {
1132 spin_unlock_bh(&mvm->queue_info_lock);
1133 IWL_ERR(mvm, "No available queues for new station\n");
1134 return -ENOSPC;
1135 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03001136 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman24afba72015-07-28 18:56:08 +03001137
1138 spin_unlock_bh(&mvm->queue_info_lock);
1139
1140 mvmsta->reserved_queue = queue;
1141
1142 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1143 queue, mvmsta->sta_id);
1144
1145 return 0;
1146}
1147
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001148/*
1149 * In DQA mode, after a HW restart the queues should be allocated as before, in
1150 * order to avoid race conditions when there are shared queues. This function
1151 * does the re-mapping and queue allocation.
1152 *
1153 * Note that re-enabling aggregations isn't done in this function.
1154 */
1155static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1156 struct iwl_mvm_sta *mvm_sta)
1157{
1158 unsigned int wdg_timeout =
1159 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1160 int i;
1161 struct iwl_trans_txq_scd_cfg cfg = {
1162 .sta_id = mvm_sta->sta_id,
1163 .frame_limit = IWL_FRAME_LIMIT,
1164 };
1165
1166 /* Make sure reserved queue is still marked as such (or allocated) */
1167 mvm->queue_info[mvm_sta->reserved_queue].status =
1168 IWL_MVM_QUEUE_RESERVED;
1169
1170 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1171 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1172 int txq_id = tid_data->txq_id;
1173 int ac;
1174 u8 mac_queue;
1175
1176 if (txq_id == IEEE80211_INVAL_HW_QUEUE)
1177 continue;
1178
1179 skb_queue_head_init(&tid_data->deferred_tx_frames);
1180
1181 ac = tid_to_mac80211_ac[i];
1182 mac_queue = mvm_sta->vif->hw_queue[ac];
1183
1184 cfg.tid = i;
1185 cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
1186 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1187 txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1188
1189 IWL_DEBUG_TX_QUEUES(mvm,
1190 "Re-mapping sta %d tid %d to queue %d\n",
1191 mvm_sta->sta_id, i, txq_id);
1192
1193 iwl_mvm_enable_txq(mvm, txq_id, mac_queue,
1194 IEEE80211_SEQ_TO_SN(tid_data->seq_number),
1195 &cfg, wdg_timeout);
1196
1197 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1198 }
1199
1200 atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
1201}
1202
Johannes Berg8ca151b2013-01-24 14:25:36 +01001203int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1204 struct ieee80211_vif *vif,
1205 struct ieee80211_sta *sta)
1206{
1207 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001208 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharona571f5f2015-12-07 12:50:58 +02001209 struct iwl_mvm_rxq_dup_data *dup_data;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001210 int i, ret, sta_id;
1211
1212 lockdep_assert_held(&mvm->mutex);
1213
1214 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
Eliad Pellerb92e6612014-01-23 17:58:23 +02001215 sta_id = iwl_mvm_find_free_sta_id(mvm,
1216 ieee80211_vif_type_p2p(vif));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001217 else
1218 sta_id = mvm_sta->sta_id;
1219
Johannes Berg36f46312015-03-10 20:32:08 +01001220 if (sta_id == IWL_MVM_STATION_COUNT)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001221 return -ENOSPC;
1222
1223 spin_lock_init(&mvm_sta->lock);
1224
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001225 /* In DQA mode, if this is a HW restart, re-alloc existing queues */
1226 if (iwl_mvm_is_dqa_supported(mvm) &&
1227 test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1228 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
1229 goto update_fw;
1230 }
1231
Johannes Berg8ca151b2013-01-24 14:25:36 +01001232 mvm_sta->sta_id = sta_id;
1233 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1234 mvmvif->color);
1235 mvm_sta->vif = vif;
1236 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001237 mvm_sta->tx_protection = 0;
1238 mvm_sta->tt_tx_protection = false;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001239
1240 /* HW restart, don't assume the memory has been zeroed */
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001241 atomic_set(&mvm->pending_frames[sta_id], 0);
Liad Kaufman69191af2015-09-01 18:50:22 +03001242 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
Johannes Berg8ca151b2013-01-24 14:25:36 +01001243 mvm_sta->tfd_queue_msk = 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001244
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001245 /*
1246 * Allocate new queues for a TDLS station, unless we're in DQA mode,
1247 * and then they'll be allocated dynamically
1248 */
1249 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) {
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001250 ret = iwl_mvm_tdls_sta_init(mvm, sta);
1251 if (ret)
1252 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +03001253 } else if (!iwl_mvm_is_dqa_supported(mvm)) {
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001254 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1255 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
1256 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
1257 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001258
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001259 /* for HW restart - reset everything but the sequence number */
Liad Kaufman24afba72015-07-28 18:56:08 +03001260 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001261 u16 seq = mvm_sta->tid_data[i].seq_number;
1262 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1263 mvm_sta->tid_data[i].seq_number = seq;
Liad Kaufman24afba72015-07-28 18:56:08 +03001264
1265 if (!iwl_mvm_is_dqa_supported(mvm))
1266 continue;
1267
1268 /*
1269 * Mark all queues for this STA as unallocated and defer TX
1270 * frames until the queue is allocated
1271 */
1272 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
1273 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001274 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001275 mvm_sta->deferred_traffic_tid_map = 0;
Eyal Shapiraefed6642014-09-14 15:58:53 +03001276 mvm_sta->agg_tids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001277
Sara Sharona571f5f2015-12-07 12:50:58 +02001278 if (iwl_mvm_has_new_rx_api(mvm) &&
1279 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1280 dup_data = kcalloc(mvm->trans->num_rx_queues,
1281 sizeof(*dup_data),
1282 GFP_KERNEL);
1283 if (!dup_data)
1284 return -ENOMEM;
1285 mvm_sta->dup_data = dup_data;
1286 }
1287
Liad Kaufman24afba72015-07-28 18:56:08 +03001288 if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufmand5216a22015-08-09 15:50:51 +03001289 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1290 ieee80211_vif_type_p2p(vif));
Liad Kaufman24afba72015-07-28 18:56:08 +03001291 if (ret)
1292 goto err;
1293 }
1294
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001295update_fw:
Liad Kaufman24afba72015-07-28 18:56:08 +03001296 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001297 if (ret)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001298 goto err;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001299
Johannes Berg9e848012014-08-04 14:33:42 +02001300 if (vif->type == NL80211_IFTYPE_STATION) {
1301 if (!sta->tdls) {
1302 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT);
1303 mvmvif->ap_sta_id = sta_id;
1304 } else {
1305 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT);
1306 }
1307 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001308
1309 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1310
1311 return 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001312
1313err:
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001314 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
1315 iwl_mvm_tdls_sta_deinit(mvm, sta);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001316 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001317}
1318
1319int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1320 bool drain)
1321{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001322 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01001323 int ret;
1324 u32 status;
1325
1326 lockdep_assert_held(&mvm->mutex);
1327
1328 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1329 cmd.sta_id = mvmsta->sta_id;
1330 cmd.add_modify = STA_MODE_MODIFY;
1331 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1332 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1333
1334 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02001335 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1336 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001337 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001338 if (ret)
1339 return ret;
1340
Sara Sharon837c4da2016-01-07 16:50:45 +02001341 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001342 case ADD_STA_SUCCESS:
1343 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1344 mvmsta->sta_id);
1345 break;
1346 default:
1347 ret = -EIO;
1348 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1349 mvmsta->sta_id);
1350 break;
1351 }
1352
1353 return ret;
1354}
1355
1356/*
1357 * Remove a station from the FW table. Before sending the command to remove
1358 * the station validate that the station is indeed known to the driver (sanity
1359 * only).
1360 */
1361static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1362{
1363 struct ieee80211_sta *sta;
1364 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1365 .sta_id = sta_id,
1366 };
1367 int ret;
1368
1369 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1370 lockdep_is_held(&mvm->mutex));
1371
1372 /* Note: internal stations are marked as error values */
1373 if (!sta) {
1374 IWL_ERR(mvm, "Invalid station id\n");
1375 return -EINVAL;
1376 }
1377
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03001378 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01001379 sizeof(rm_sta_cmd), &rm_sta_cmd);
1380 if (ret) {
1381 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1382 return ret;
1383 }
1384
1385 return 0;
1386}
1387
1388void iwl_mvm_sta_drained_wk(struct work_struct *wk)
1389{
1390 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
1391 u8 sta_id;
1392
1393 /*
1394 * The mutex is needed because of the SYNC cmd, but not only: if the
1395 * work would run concurrently with iwl_mvm_rm_sta, it would run before
1396 * iwl_mvm_rm_sta sets the station as busy, and exit. Then
1397 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
1398 * that later.
1399 */
1400 mutex_lock(&mvm->mutex);
1401
1402 for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
1403 int ret;
1404 struct ieee80211_sta *sta =
1405 rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1406 lockdep_is_held(&mvm->mutex));
1407
Johannes Berg1ddbbb02013-12-04 22:39:17 +01001408 /*
1409 * This station is in use or RCU-removed; the latter happens in
1410 * managed mode, where mac80211 removes the station before we
1411 * can remove it from firmware (we can only do that after the
1412 * MAC is marked unassociated), and possibly while the deauth
1413 * frame to disconnect from the AP is still queued. Then, the
1414 * station pointer is -ENOENT when the last skb is reclaimed.
1415 */
1416 if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001417 continue;
1418
1419 if (PTR_ERR(sta) == -EINVAL) {
1420 IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
1421 sta_id);
1422 continue;
1423 }
1424
1425 if (!sta) {
1426 IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
1427 sta_id);
1428 continue;
1429 }
1430
1431 WARN_ON(PTR_ERR(sta) != -EBUSY);
1432 /* This station was removed and we waited until it got drained,
1433 * we can now proceed and remove it.
1434 */
1435 ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1436 if (ret) {
1437 IWL_ERR(mvm,
1438 "Couldn't remove sta %d after it was drained\n",
1439 sta_id);
1440 continue;
1441 }
Monam Agarwalc531c772014-03-24 00:05:56 +05301442 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001443 clear_bit(sta_id, mvm->sta_drained);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001444
1445 if (mvm->tfd_drained[sta_id]) {
1446 unsigned long i, msk = mvm->tfd_drained[sta_id];
1447
Emmanuel Grumbacha4ca3ed2015-01-20 17:07:10 +02001448 for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
Arik Nemtsov06ecdba2015-10-12 14:47:11 +03001449 iwl_mvm_disable_txq(mvm, i, i,
1450 IWL_MAX_TID_COUNT, 0);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001451
1452 mvm->tfd_drained[sta_id] = 0;
1453 IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
1454 sta_id, msk);
1455 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001456 }
1457
1458 mutex_unlock(&mvm->mutex);
1459}
1460
Liad Kaufman24afba72015-07-28 18:56:08 +03001461static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1462 struct ieee80211_vif *vif,
1463 struct iwl_mvm_sta *mvm_sta)
1464{
1465 int ac;
1466 int i;
1467
1468 lockdep_assert_held(&mvm->mutex);
1469
1470 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1471 if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
1472 continue;
1473
1474 ac = iwl_mvm_tid_to_ac_queue(i);
1475 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1476 vif->hw_queue[ac], i, 0);
1477 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
1478 }
1479}
1480
Johannes Berg8ca151b2013-01-24 14:25:36 +01001481int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1482 struct ieee80211_vif *vif,
1483 struct ieee80211_sta *sta)
1484{
1485 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001486 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001487 int ret;
1488
1489 lockdep_assert_held(&mvm->mutex);
1490
Sara Sharona571f5f2015-12-07 12:50:58 +02001491 if (iwl_mvm_has_new_rx_api(mvm))
1492 kfree(mvm_sta->dup_data);
1493
Liad Kaufmana6f035a2015-08-24 15:23:14 +03001494 if ((vif->type == NL80211_IFTYPE_STATION &&
1495 mvmvif->ap_sta_id == mvm_sta->sta_id) ||
1496 iwl_mvm_is_dqa_supported(mvm)){
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001497 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1498 if (ret)
1499 return ret;
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02001500 /* flush its queues here since we are freeing mvm_sta */
Luca Coelho5888a402015-10-06 09:54:57 +03001501 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001502 if (ret)
1503 return ret;
1504 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
1505 mvm_sta->tfd_queue_msk);
1506 if (ret)
1507 return ret;
1508 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02001509
Liad Kaufman24afba72015-07-28 18:56:08 +03001510 /* If DQA is supported - the queues can be disabled now */
Liad Kaufman56214742016-09-22 15:14:08 +03001511 if (iwl_mvm_is_dqa_supported(mvm))
1512 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
1513
1514 /* If there is a TXQ still marked as reserved - free it */
1515 if (iwl_mvm_is_dqa_supported(mvm) &&
1516 mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001517 u8 reserved_txq = mvm_sta->reserved_queue;
1518 enum iwl_mvm_queue_status *status;
1519
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001520 /*
1521 * If no traffic has gone through the reserved TXQ - it
1522 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1523 * should be manually marked as free again
1524 */
1525 spin_lock_bh(&mvm->queue_info_lock);
1526 status = &mvm->queue_info[reserved_txq].status;
1527 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1528 (*status != IWL_MVM_QUEUE_FREE),
1529 "sta_id %d reserved txq %d status %d",
1530 mvm_sta->sta_id, reserved_txq, *status)) {
1531 spin_unlock_bh(&mvm->queue_info_lock);
1532 return -EINVAL;
1533 }
1534
1535 *status = IWL_MVM_QUEUE_FREE;
1536 spin_unlock_bh(&mvm->queue_info_lock);
1537 }
1538
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001539 if (vif->type == NL80211_IFTYPE_STATION &&
1540 mvmvif->ap_sta_id == mvm_sta->sta_id) {
1541 /* if associated - we can't remove the AP STA now */
1542 if (vif->bss_conf.assoc)
1543 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001544
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001545 /* unassoc - go ahead - remove the AP STA now */
1546 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
Eliad Peller37577fe2013-12-05 17:19:39 +02001547
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001548 /* clear d0i3_ap_sta_id if no longer relevant */
1549 if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
1550 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1551 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001552 }
1553
1554 /*
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03001555 * This shouldn't happen - the TDLS channel switch should be canceled
1556 * before the STA is removed.
1557 */
1558 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) {
1559 mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
1560 cancel_delayed_work(&mvm->tdls_cs.dwork);
1561 }
1562
1563 /*
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001564 * Make sure that the tx response code sees the station as -EBUSY and
1565 * calls the drain worker.
1566 */
1567 spin_lock_bh(&mvm_sta->lock);
1568 /*
Johannes Berg8ca151b2013-01-24 14:25:36 +01001569 * There are frames pending on the AC queues for this station.
1570 * We need to wait until all the frames are drained...
1571 */
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001572 if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001573 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
1574 ERR_PTR(-EBUSY));
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001575 spin_unlock_bh(&mvm_sta->lock);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001576
1577 /* disable TDLS sta queues on drain complete */
1578 if (sta->tdls) {
1579 mvm->tfd_drained[mvm_sta->sta_id] =
1580 mvm_sta->tfd_queue_msk;
1581 IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
1582 mvm_sta->sta_id);
1583 }
1584
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001585 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001586 } else {
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001587 spin_unlock_bh(&mvm_sta->lock);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001588
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001589 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001590 iwl_mvm_tdls_sta_deinit(mvm, sta);
1591
Johannes Berg8ca151b2013-01-24 14:25:36 +01001592 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
Monam Agarwalc531c772014-03-24 00:05:56 +05301593 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001594 }
1595
1596 return ret;
1597}
1598
1599int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1600 struct ieee80211_vif *vif,
1601 u8 sta_id)
1602{
1603 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1604
1605 lockdep_assert_held(&mvm->mutex);
1606
Monam Agarwalc531c772014-03-24 00:05:56 +05301607 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001608 return ret;
1609}
1610
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001611int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1612 struct iwl_mvm_int_sta *sta,
1613 u32 qmask, enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001614{
1615 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Eliad Pellerb92e6612014-01-23 17:58:23 +02001616 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001617 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
1618 return -ENOSPC;
1619 }
1620
1621 sta->tfd_queue_msk = qmask;
1622
1623 /* put a non-NULL value so iterating over the stations won't stop */
1624 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1625 return 0;
1626}
1627
Johannes Berg712b24a2014-08-04 14:14:14 +02001628static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
1629 struct iwl_mvm_int_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001630{
Monam Agarwalc531c772014-03-24 00:05:56 +05301631 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001632 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
1633 sta->sta_id = IWL_MVM_STATION_COUNT;
1634}
1635
1636static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1637 struct iwl_mvm_int_sta *sta,
1638 const u8 *addr,
1639 u16 mac_id, u16 color)
1640{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001641 struct iwl_mvm_add_sta_cmd cmd;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001642 int ret;
1643 u32 status;
1644
1645 lockdep_assert_held(&mvm->mutex);
1646
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001647 memset(&cmd, 0, sizeof(cmd));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001648 cmd.sta_id = sta->sta_id;
1649 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1650 color));
1651
1652 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
Liad Kaufmancf0cda12015-09-24 10:44:12 +02001653 cmd.tid_disable_tx = cpu_to_le16(0xffff);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001654
1655 if (addr)
1656 memcpy(cmd.addr, addr, ETH_ALEN);
1657
Sara Sharon854c5702016-01-26 13:17:47 +02001658 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1659 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001660 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001661 if (ret)
1662 return ret;
1663
Sara Sharon837c4da2016-01-07 16:50:45 +02001664 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001665 case ADD_STA_SUCCESS:
1666 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1667 return 0;
1668 default:
1669 ret = -EIO;
1670 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1671 status);
1672 break;
1673 }
1674 return ret;
1675}
1676
1677int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1678{
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001679 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1680 mvm->cfg->base_params->wd_timeout :
1681 IWL_WATCHDOG_DISABLED;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001682 int ret;
1683
1684 lockdep_assert_held(&mvm->mutex);
1685
Ariej Marjieh7da91b02014-07-07 12:09:40 +03001686 /* Map Aux queue to fifo - needs to happen before adding Aux station */
Liad Kaufman28d07932015-09-01 16:36:25 +03001687 if (!iwl_mvm_is_dqa_supported(mvm))
1688 iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
1689 IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
Ariej Marjieh7da91b02014-07-07 12:09:40 +03001690
1691 /* Allocate aux station and assign to it the aux queue */
1692 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
Eliad Pellerb92e6612014-01-23 17:58:23 +02001693 NL80211_IFTYPE_UNSPECIFIED);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001694 if (ret)
1695 return ret;
1696
Liad Kaufman28d07932015-09-01 16:36:25 +03001697 if (iwl_mvm_is_dqa_supported(mvm)) {
1698 struct iwl_trans_txq_scd_cfg cfg = {
1699 .fifo = IWL_MVM_TX_FIFO_MCAST,
1700 .sta_id = mvm->aux_sta.sta_id,
1701 .tid = IWL_MAX_TID_COUNT,
1702 .aggregate = false,
1703 .frame_limit = IWL_FRAME_LIMIT,
1704 };
1705
1706 iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
1707 wdg_timeout);
1708 }
1709
Johannes Berg8ca151b2013-01-24 14:25:36 +01001710 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1711 MAC_INDEX_AUX, 0);
1712
1713 if (ret)
1714 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1715 return ret;
1716}
1717
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001718int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1719{
1720 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1721
1722 lockdep_assert_held(&mvm->mutex);
1723 return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
1724 mvmvif->id, 0);
1725}
1726
1727int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1728{
1729 int ret;
1730
1731 lockdep_assert_held(&mvm->mutex);
1732
1733 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
1734 if (ret)
1735 IWL_WARN(mvm, "Failed sending remove station\n");
1736
1737 return ret;
1738}
1739
1740void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
1741{
1742 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
1743}
1744
Johannes Berg712b24a2014-08-04 14:14:14 +02001745void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
1746{
1747 lockdep_assert_held(&mvm->mutex);
1748
1749 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1750}
1751
Johannes Berg8ca151b2013-01-24 14:25:36 +01001752/*
1753 * Send the add station command for the vif's broadcast station.
1754 * Assumes that the station was already allocated.
1755 *
1756 * @mvm: the mvm component
1757 * @vif: the interface to which the broadcast station is added
1758 * @bsta: the broadcast station to add.
1759 */
Johannes Berg013290a2014-08-04 13:38:48 +02001760int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001761{
1762 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02001763 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg5023d962013-07-31 14:07:43 +02001764 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
Johannes Berga4243402014-01-20 23:46:38 +01001765 const u8 *baddr = _baddr;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001766
1767 lockdep_assert_held(&mvm->mutex);
1768
Liad Kaufmande24f632015-08-04 15:19:18 +03001769 if (iwl_mvm_is_dqa_supported(mvm)) {
1770 struct iwl_trans_txq_scd_cfg cfg = {
1771 .fifo = IWL_MVM_TX_FIFO_VO,
1772 .sta_id = mvmvif->bcast_sta.sta_id,
1773 .tid = IWL_MAX_TID_COUNT,
1774 .aggregate = false,
1775 .frame_limit = IWL_FRAME_LIMIT,
1776 };
1777 unsigned int wdg_timeout =
1778 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1779 int queue;
1780
1781 if ((vif->type == NL80211_IFTYPE_AP) &&
1782 (mvmvif->bcast_sta.tfd_queue_msk &
1783 BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)))
1784 queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
Liad Kaufman4c965132015-08-09 19:26:56 +03001785 else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) &&
1786 (mvmvif->bcast_sta.tfd_queue_msk &
1787 BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)))
1788 queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
Liad Kaufmande24f632015-08-04 15:19:18 +03001789 else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
1790 return -EINVAL;
1791
1792 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
1793 wdg_timeout);
1794 }
1795
Johannes Berg5023d962013-07-31 14:07:43 +02001796 if (vif->type == NL80211_IFTYPE_ADHOC)
1797 baddr = vif->bss_conf.bssid;
1798
Johannes Berg8ca151b2013-01-24 14:25:36 +01001799 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
1800 return -ENOSPC;
1801
1802 return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
1803 mvmvif->id, mvmvif->color);
1804}
1805
1806/* Send the FW a request to remove the station from it's internal data
1807 * structures, but DO NOT remove the entry from the local data structures. */
Johannes Berg013290a2014-08-04 13:38:48 +02001808int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001809{
Johannes Berg013290a2014-08-04 13:38:48 +02001810 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001811 int ret;
1812
1813 lockdep_assert_held(&mvm->mutex);
1814
Johannes Berg013290a2014-08-04 13:38:48 +02001815 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001816 if (ret)
1817 IWL_WARN(mvm, "Failed sending remove station\n");
1818 return ret;
1819}
1820
Johannes Berg013290a2014-08-04 13:38:48 +02001821int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1822{
1823 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Liad Kaufmande24f632015-08-04 15:19:18 +03001824 u32 qmask = 0;
Johannes Berg013290a2014-08-04 13:38:48 +02001825
1826 lockdep_assert_held(&mvm->mutex);
1827
Liad Kaufmande24f632015-08-04 15:19:18 +03001828 if (!iwl_mvm_is_dqa_supported(mvm))
1829 qmask = iwl_mvm_mac_get_queues_mask(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02001830
Liad Kaufmande24f632015-08-04 15:19:18 +03001831 if (vif->type == NL80211_IFTYPE_AP) {
1832 /*
1833 * The firmware defines the TFD queue mask to only be relevant
1834 * for *unicast* queues, so the multicast (CAB) queue shouldn't
1835 * be included.
1836 */
Johannes Berg013290a2014-08-04 13:38:48 +02001837 qmask &= ~BIT(vif->cab_queue);
1838
Liad Kaufmande24f632015-08-04 15:19:18 +03001839 if (iwl_mvm_is_dqa_supported(mvm))
1840 qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
Liad Kaufman4c965132015-08-09 19:26:56 +03001841 } else if (iwl_mvm_is_dqa_supported(mvm) &&
1842 vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1843 qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
Liad Kaufmande24f632015-08-04 15:19:18 +03001844 }
1845
Johannes Berg013290a2014-08-04 13:38:48 +02001846 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
1847 ieee80211_vif_type_p2p(vif));
1848}
1849
Johannes Berg8ca151b2013-01-24 14:25:36 +01001850/* Allocate a new station entry for the broadcast station to the given vif,
1851 * and send it to the FW.
1852 * Note that each P2P mac should have its own broadcast station.
1853 *
1854 * @mvm: the mvm component
1855 * @vif: the interface to which the broadcast station is added
1856 * @bsta: the broadcast station to add. */
Johannes Berg013290a2014-08-04 13:38:48 +02001857int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001858{
1859 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02001860 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001861 int ret;
1862
1863 lockdep_assert_held(&mvm->mutex);
1864
Johannes Berg013290a2014-08-04 13:38:48 +02001865 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001866 if (ret)
1867 return ret;
1868
Johannes Berg013290a2014-08-04 13:38:48 +02001869 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001870
1871 if (ret)
1872 iwl_mvm_dealloc_int_sta(mvm, bsta);
Johannes Berg013290a2014-08-04 13:38:48 +02001873
Johannes Berg8ca151b2013-01-24 14:25:36 +01001874 return ret;
1875}
1876
Johannes Berg013290a2014-08-04 13:38:48 +02001877void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1878{
1879 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1880
1881 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
1882}
1883
Johannes Berg8ca151b2013-01-24 14:25:36 +01001884/*
1885 * Send the FW a request to remove the station from it's internal data
1886 * structures, and in addition remove it from the local data structure.
1887 */
Johannes Berg013290a2014-08-04 13:38:48 +02001888int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001889{
1890 int ret;
1891
1892 lockdep_assert_held(&mvm->mutex);
1893
Johannes Berg013290a2014-08-04 13:38:48 +02001894 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001895
Johannes Berg013290a2014-08-04 13:38:48 +02001896 iwl_mvm_dealloc_bcast_sta(mvm, vif);
1897
Johannes Berg8ca151b2013-01-24 14:25:36 +01001898 return ret;
1899}
1900
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03001901#define IWL_MAX_RX_BA_SESSIONS 16
1902
Sara Sharonb915c102016-03-23 16:32:02 +02001903static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
Sara Sharon10b2b202016-03-20 16:23:41 +02001904{
Sara Sharonb915c102016-03-23 16:32:02 +02001905 struct iwl_mvm_delba_notif notif = {
1906 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
1907 .metadata.sync = 1,
1908 .delba.baid = baid,
Sara Sharon10b2b202016-03-20 16:23:41 +02001909 };
Sara Sharonb915c102016-03-23 16:32:02 +02001910 iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
1911};
Sara Sharon10b2b202016-03-20 16:23:41 +02001912
Sara Sharonb915c102016-03-23 16:32:02 +02001913static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
1914 struct iwl_mvm_baid_data *data)
1915{
1916 int i;
1917
1918 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
1919
1920 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
1921 int j;
1922 struct iwl_mvm_reorder_buffer *reorder_buf =
1923 &data->reorder_buf[i];
1924
Sara Sharon06904052016-02-28 20:28:17 +02001925 spin_lock_bh(&reorder_buf->lock);
1926 if (likely(!reorder_buf->num_stored)) {
1927 spin_unlock_bh(&reorder_buf->lock);
Sara Sharonb915c102016-03-23 16:32:02 +02001928 continue;
Sara Sharon06904052016-02-28 20:28:17 +02001929 }
Sara Sharonb915c102016-03-23 16:32:02 +02001930
1931 /*
1932 * This shouldn't happen in regular DELBA since the internal
1933 * delBA notification should trigger a release of all frames in
1934 * the reorder buffer.
1935 */
1936 WARN_ON(1);
1937
1938 for (j = 0; j < reorder_buf->buf_size; j++)
1939 __skb_queue_purge(&reorder_buf->entries[j]);
Sara Sharon06904052016-02-28 20:28:17 +02001940 /*
1941 * Prevent timer re-arm. This prevents a very far fetched case
1942 * where we timed out on the notification. There may be prior
1943 * RX frames pending in the RX queue before the notification
1944 * that might get processed between now and the actual deletion
1945 * and we would re-arm the timer although we are deleting the
1946 * reorder buffer.
1947 */
1948 reorder_buf->removed = true;
1949 spin_unlock_bh(&reorder_buf->lock);
1950 del_timer_sync(&reorder_buf->reorder_timer);
Sara Sharonb915c102016-03-23 16:32:02 +02001951 }
1952}
1953
1954static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
1955 u32 sta_id,
1956 struct iwl_mvm_baid_data *data,
1957 u16 ssn, u8 buf_size)
1958{
1959 int i;
1960
1961 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
1962 struct iwl_mvm_reorder_buffer *reorder_buf =
1963 &data->reorder_buf[i];
1964 int j;
1965
1966 reorder_buf->num_stored = 0;
1967 reorder_buf->head_sn = ssn;
1968 reorder_buf->buf_size = buf_size;
Sara Sharon06904052016-02-28 20:28:17 +02001969 /* rx reorder timer */
1970 reorder_buf->reorder_timer.function =
1971 iwl_mvm_reorder_timer_expired;
1972 reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
1973 init_timer(&reorder_buf->reorder_timer);
1974 spin_lock_init(&reorder_buf->lock);
1975 reorder_buf->mvm = mvm;
Sara Sharonb915c102016-03-23 16:32:02 +02001976 reorder_buf->queue = i;
1977 reorder_buf->sta_id = sta_id;
1978 for (j = 0; j < reorder_buf->buf_size; j++)
1979 __skb_queue_head_init(&reorder_buf->entries[j]);
1980 }
Sara Sharon10b2b202016-03-20 16:23:41 +02001981}
1982
Johannes Berg8ca151b2013-01-24 14:25:36 +01001983int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Sara Sharon10b2b202016-03-20 16:23:41 +02001984 int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001985{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001986 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001987 struct iwl_mvm_add_sta_cmd cmd = {};
Sara Sharon10b2b202016-03-20 16:23:41 +02001988 struct iwl_mvm_baid_data *baid_data = NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001989 int ret;
1990 u32 status;
1991
1992 lockdep_assert_held(&mvm->mutex);
1993
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03001994 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
1995 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
1996 return -ENOSPC;
1997 }
1998
Sara Sharon10b2b202016-03-20 16:23:41 +02001999 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2000 /*
2001 * Allocate here so if allocation fails we can bail out early
2002 * before starting the BA session in the firmware
2003 */
Sara Sharonb915c102016-03-23 16:32:02 +02002004 baid_data = kzalloc(sizeof(*baid_data) +
2005 mvm->trans->num_rx_queues *
2006 sizeof(baid_data->reorder_buf[0]),
2007 GFP_KERNEL);
Sara Sharon10b2b202016-03-20 16:23:41 +02002008 if (!baid_data)
2009 return -ENOMEM;
2010 }
2011
Johannes Berg8ca151b2013-01-24 14:25:36 +01002012 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2013 cmd.sta_id = mvm_sta->sta_id;
2014 cmd.add_modify = STA_MODE_MODIFY;
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002015 if (start) {
2016 cmd.add_immediate_ba_tid = (u8) tid;
2017 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
Sara Sharon854c5702016-01-26 13:17:47 +02002018 cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002019 } else {
2020 cmd.remove_immediate_ba_tid = (u8) tid;
2021 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002022 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2023 STA_MODIFY_REMOVE_BA_TID;
2024
2025 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002026 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2027 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002028 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002029 if (ret)
Sara Sharon10b2b202016-03-20 16:23:41 +02002030 goto out_free;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002031
Sara Sharon837c4da2016-01-07 16:50:45 +02002032 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002033 case ADD_STA_SUCCESS:
Sara Sharon35263a02016-06-21 12:12:10 +03002034 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2035 start ? "start" : "stopp");
Johannes Berg8ca151b2013-01-24 14:25:36 +01002036 break;
2037 case ADD_STA_IMMEDIATE_BA_FAILURE:
2038 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2039 ret = -ENOSPC;
2040 break;
2041 default:
2042 ret = -EIO;
2043 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2044 start ? "start" : "stopp", status);
2045 break;
2046 }
2047
Sara Sharon10b2b202016-03-20 16:23:41 +02002048 if (ret)
2049 goto out_free;
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002050
Sara Sharon10b2b202016-03-20 16:23:41 +02002051 if (start) {
2052 u8 baid;
2053
2054 mvm->rx_ba_sessions++;
2055
2056 if (!iwl_mvm_has_new_rx_api(mvm))
2057 return 0;
2058
2059 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2060 ret = -EINVAL;
2061 goto out_free;
2062 }
2063 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2064 IWL_ADD_STA_BAID_SHIFT);
2065 baid_data->baid = baid;
2066 baid_data->timeout = timeout;
2067 baid_data->last_rx = jiffies;
Wei Yongjun72c240f2016-07-12 11:40:57 +00002068 setup_timer(&baid_data->session_timer,
2069 iwl_mvm_rx_agg_session_expired,
2070 (unsigned long)&mvm->baid_map[baid]);
Sara Sharon10b2b202016-03-20 16:23:41 +02002071 baid_data->mvm = mvm;
2072 baid_data->tid = tid;
2073 baid_data->sta_id = mvm_sta->sta_id;
2074
2075 mvm_sta->tid_to_baid[tid] = baid;
2076 if (timeout)
2077 mod_timer(&baid_data->session_timer,
2078 TU_TO_EXP_TIME(timeout * 2));
2079
Sara Sharonb915c102016-03-23 16:32:02 +02002080 iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
2081 baid_data, ssn, buf_size);
Sara Sharon10b2b202016-03-20 16:23:41 +02002082 /*
2083 * protect the BA data with RCU to cover a case where our
2084 * internal RX sync mechanism will timeout (not that it's
2085 * supposed to happen) and we will free the session data while
2086 * RX is being processed in parallel
2087 */
Sara Sharon35263a02016-06-21 12:12:10 +03002088 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2089 mvm_sta->sta_id, tid, baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002090 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2091 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
Sara Sharon60dec522016-06-21 14:14:08 +03002092 } else {
Sara Sharon10b2b202016-03-20 16:23:41 +02002093 u8 baid = mvm_sta->tid_to_baid[tid];
2094
Sara Sharon60dec522016-06-21 14:14:08 +03002095 if (mvm->rx_ba_sessions > 0)
2096 /* check that restart flow didn't zero the counter */
2097 mvm->rx_ba_sessions--;
Sara Sharon10b2b202016-03-20 16:23:41 +02002098 if (!iwl_mvm_has_new_rx_api(mvm))
2099 return 0;
2100
2101 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2102 return -EINVAL;
2103
2104 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2105 if (WARN_ON(!baid_data))
2106 return -EINVAL;
2107
2108 /* synchronize all rx queues so we can safely delete */
Sara Sharonb915c102016-03-23 16:32:02 +02002109 iwl_mvm_free_reorder(mvm, baid_data);
Sara Sharon10b2b202016-03-20 16:23:41 +02002110 del_timer_sync(&baid_data->session_timer);
Sara Sharon10b2b202016-03-20 16:23:41 +02002111 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2112 kfree_rcu(baid_data, rcu_head);
Sara Sharon35263a02016-06-21 12:12:10 +03002113 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002114 }
2115 return 0;
2116
2117out_free:
2118 kfree(baid_data);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002119 return ret;
2120}
2121
Liad Kaufman9794c642015-08-19 17:34:28 +03002122int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2123 int tid, u8 queue, bool start)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002124{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002125 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002126 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01002127 int ret;
2128 u32 status;
2129
2130 lockdep_assert_held(&mvm->mutex);
2131
2132 if (start) {
2133 mvm_sta->tfd_queue_msk |= BIT(queue);
2134 mvm_sta->tid_disable_agg &= ~BIT(tid);
2135 } else {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002136 /* In DQA-mode the queue isn't removed on agg termination */
2137 if (!iwl_mvm_is_dqa_supported(mvm))
2138 mvm_sta->tfd_queue_msk &= ~BIT(queue);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002139 mvm_sta->tid_disable_agg |= BIT(tid);
2140 }
2141
2142 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2143 cmd.sta_id = mvm_sta->sta_id;
2144 cmd.add_modify = STA_MODE_MODIFY;
2145 cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
2146 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2147 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2148
2149 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002150 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2151 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002152 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002153 if (ret)
2154 return ret;
2155
Sara Sharon837c4da2016-01-07 16:50:45 +02002156 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002157 case ADD_STA_SUCCESS:
2158 break;
2159 default:
2160 ret = -EIO;
2161 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2162 start ? "start" : "stopp", status);
2163 break;
2164 }
2165
2166 return ret;
2167}
2168
Emmanuel Grumbachb797e3f2014-03-06 14:49:36 +02002169const u8 tid_to_mac80211_ac[] = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002170 IEEE80211_AC_BE,
2171 IEEE80211_AC_BK,
2172 IEEE80211_AC_BK,
2173 IEEE80211_AC_BE,
2174 IEEE80211_AC_VI,
2175 IEEE80211_AC_VI,
2176 IEEE80211_AC_VO,
2177 IEEE80211_AC_VO,
Liad Kaufman9794c642015-08-19 17:34:28 +03002178 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002179};
2180
Johannes Berg3e56ead2013-02-15 22:23:18 +01002181static const u8 tid_to_ucode_ac[] = {
2182 AC_BE,
2183 AC_BK,
2184 AC_BK,
2185 AC_BE,
2186 AC_VI,
2187 AC_VI,
2188 AC_VO,
2189 AC_VO,
2190};
2191
Johannes Berg8ca151b2013-01-24 14:25:36 +01002192int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2193 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2194{
Johannes Berg5b577a92013-11-14 18:20:04 +01002195 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002196 struct iwl_mvm_tid_data *tid_data;
2197 int txq_id;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002198 int ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002199
2200 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2201 return -EINVAL;
2202
2203 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2204 IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
2205 mvmsta->tid_data[tid].state);
2206 return -ENXIO;
2207 }
2208
2209 lockdep_assert_held(&mvm->mutex);
2210
Arik Nemtsovb2492502014-03-13 12:21:50 +02002211 spin_lock_bh(&mvmsta->lock);
2212
2213 /* possible race condition - we entered D0i3 while starting agg */
2214 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2215 spin_unlock_bh(&mvmsta->lock);
2216 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2217 return -EIO;
2218 }
2219
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002220 spin_lock(&mvm->queue_info_lock);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002221
Liad Kaufmancf961e12015-08-13 19:16:08 +03002222 /*
2223 * Note the possible cases:
2224 * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
2225 * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
2226 * one and mark it as reserved
2227 * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
2228 * non-DQA mode, since the TXQ hasn't yet been allocated
2229 */
2230 txq_id = mvmsta->tid_data[tid].txq_id;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002231 if (iwl_mvm_is_dqa_supported(mvm) &&
2232 unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) {
2233 ret = -ENXIO;
2234 IWL_DEBUG_TX_QUEUES(mvm,
2235 "Can't start tid %d agg on shared queue!\n",
2236 tid);
2237 goto release_locks;
2238 } else if (!iwl_mvm_is_dqa_supported(mvm) ||
Liad Kaufmancf961e12015-08-13 19:16:08 +03002239 mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
Liad Kaufman9794c642015-08-19 17:34:28 +03002240 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2241 mvm->first_agg_queue,
Liad Kaufmancf961e12015-08-13 19:16:08 +03002242 mvm->last_agg_queue);
2243 if (txq_id < 0) {
2244 ret = txq_id;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002245 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2246 goto release_locks;
2247 }
2248
2249 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2250 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002251 }
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002252
2253 spin_unlock(&mvm->queue_info_lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002254
Liad Kaufmancf961e12015-08-13 19:16:08 +03002255 IWL_DEBUG_TX_QUEUES(mvm,
2256 "AGG for tid %d will be on queue #%d\n",
2257 tid, txq_id);
2258
Johannes Berg8ca151b2013-01-24 14:25:36 +01002259 tid_data = &mvmsta->tid_data[tid];
Johannes Berg9a886582013-02-15 19:25:00 +01002260 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002261 tid_data->txq_id = txq_id;
2262 *ssn = tid_data->ssn;
2263
2264 IWL_DEBUG_TX_QUEUES(mvm,
2265 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2266 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2267 tid_data->next_reclaimed);
2268
2269 if (tid_data->ssn == tid_data->next_reclaimed) {
2270 tid_data->state = IWL_AGG_STARTING;
2271 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2272 } else {
2273 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2274 }
2275
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002276 ret = 0;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002277 goto out;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002278
2279release_locks:
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002280 spin_unlock(&mvm->queue_info_lock);
2281out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01002282 spin_unlock_bh(&mvmsta->lock);
2283
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002284 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002285}
2286
2287int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002288 struct ieee80211_sta *sta, u16 tid, u8 buf_size,
2289 bool amsdu)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002290{
Johannes Berg5b577a92013-11-14 18:20:04 +01002291 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002292 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +02002293 unsigned int wdg_timeout =
2294 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002295 int queue, ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002296 bool alloc_queue = true;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002297 enum iwl_mvm_queue_status queue_status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002298 u16 ssn;
2299
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002300 struct iwl_trans_txq_scd_cfg cfg = {
2301 .sta_id = mvmsta->sta_id,
2302 .tid = tid,
2303 .frame_limit = buf_size,
2304 .aggregate = true,
2305 };
2306
Eyal Shapiraefed6642014-09-14 15:58:53 +03002307 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2308 != IWL_MAX_TID_COUNT);
2309
Johannes Berg8ca151b2013-01-24 14:25:36 +01002310 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
2311
2312 spin_lock_bh(&mvmsta->lock);
2313 ssn = tid_data->ssn;
2314 queue = tid_data->txq_id;
2315 tid_data->state = IWL_AGG_ON;
Eyal Shapiraefed6642014-09-14 15:58:53 +03002316 mvmsta->agg_tids |= BIT(tid);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002317 tid_data->ssn = 0xffff;
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002318 tid_data->amsdu_in_ampdu_allowed = amsdu;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002319 spin_unlock_bh(&mvmsta->lock);
2320
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002321 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
Johannes Berg8ca151b2013-01-24 14:25:36 +01002322
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002323 spin_lock_bh(&mvm->queue_info_lock);
2324 queue_status = mvm->queue_info[queue].status;
2325 spin_unlock_bh(&mvm->queue_info_lock);
2326
Liad Kaufmancf961e12015-08-13 19:16:08 +03002327 /* In DQA mode, the existing queue might need to be reconfigured */
2328 if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002329 /* Maybe there is no need to even alloc a queue... */
2330 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2331 alloc_queue = false;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002332
2333 /*
2334 * Only reconfig the SCD for the queue if the window size has
2335 * changed from current (become smaller)
2336 */
2337 if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
2338 /*
2339 * If reconfiguring an existing queue, it first must be
2340 * drained
2341 */
2342 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
2343 BIT(queue));
2344 if (ret) {
2345 IWL_ERR(mvm,
2346 "Error draining queue before reconfig\n");
2347 return ret;
2348 }
2349
2350 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2351 mvmsta->sta_id, tid,
2352 buf_size, ssn);
2353 if (ret) {
2354 IWL_ERR(mvm,
2355 "Error reconfiguring TXQ #%d\n", queue);
2356 return ret;
2357 }
2358 }
2359 }
2360
2361 if (alloc_queue)
2362 iwl_mvm_enable_txq(mvm, queue,
2363 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
2364 &cfg, wdg_timeout);
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03002365
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002366 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2367 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2368 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2369 if (ret)
2370 return -EIO;
2371 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002372
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002373 /* No need to mark as reserved */
2374 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002375 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002376 spin_unlock_bh(&mvm->queue_info_lock);
2377
Johannes Berg8ca151b2013-01-24 14:25:36 +01002378 /*
2379 * Even though in theory the peer could have different
2380 * aggregation reorder buffer sizes for different sessions,
2381 * our ucode doesn't allow for that and has a global limit
2382 * for each station. Therefore, use the minimum of all the
2383 * aggregation sessions and our default value.
2384 */
2385 mvmsta->max_agg_bufsize =
2386 min(mvmsta->max_agg_bufsize, buf_size);
2387 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2388
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03002389 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2390 sta->addr, tid);
2391
Eyal Shapira9e680942013-11-09 00:16:16 +02002392 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002393}
2394
2395int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2396 struct ieee80211_sta *sta, u16 tid)
2397{
Johannes Berg5b577a92013-11-14 18:20:04 +01002398 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002399 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2400 u16 txq_id;
2401 int err;
2402
Emmanuel Grumbachf9aa8dd2013-03-04 09:11:08 +02002403 /*
2404 * If mac80211 is cleaning its state, then say that we finished since
2405 * our state has been cleared anyway.
2406 */
2407 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
2408 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2409 return 0;
2410 }
2411
Johannes Berg8ca151b2013-01-24 14:25:36 +01002412 spin_lock_bh(&mvmsta->lock);
2413
2414 txq_id = tid_data->txq_id;
2415
2416 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
2417 mvmsta->sta_id, tid, txq_id, tid_data->state);
2418
Eyal Shapiraefed6642014-09-14 15:58:53 +03002419 mvmsta->agg_tids &= ~BIT(tid);
2420
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002421 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002422 /*
2423 * The TXQ is marked as reserved only if no traffic came through yet
2424 * This means no traffic has been sent on this TID (agg'd or not), so
2425 * we no longer have use for the queue. Since it hasn't even been
2426 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2427 * free.
2428 */
2429 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2430 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002431
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002432 spin_unlock_bh(&mvm->queue_info_lock);
2433
Johannes Berg8ca151b2013-01-24 14:25:36 +01002434 switch (tid_data->state) {
2435 case IWL_AGG_ON:
Johannes Berg9a886582013-02-15 19:25:00 +01002436 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002437
2438 IWL_DEBUG_TX_QUEUES(mvm,
2439 "ssn = %d, next_recl = %d\n",
2440 tid_data->ssn, tid_data->next_reclaimed);
2441
2442 /* There are still packets for this RA / TID in the HW */
2443 if (tid_data->ssn != tid_data->next_reclaimed) {
2444 tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
2445 err = 0;
2446 break;
2447 }
2448
2449 tid_data->ssn = 0xffff;
Johannes Bergf7f89e72014-08-05 15:24:44 +02002450 tid_data->state = IWL_AGG_OFF;
Johannes Bergf7f89e72014-08-05 15:24:44 +02002451 spin_unlock_bh(&mvmsta->lock);
2452
2453 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2454
2455 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2456
Liad Kaufmancf961e12015-08-13 19:16:08 +03002457 if (!iwl_mvm_is_dqa_supported(mvm)) {
2458 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
2459
2460 iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
2461 }
Johannes Bergf7f89e72014-08-05 15:24:44 +02002462 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002463 case IWL_AGG_STARTING:
2464 case IWL_EMPTYING_HW_QUEUE_ADDBA:
2465 /*
2466 * The agg session has been stopped before it was set up. This
2467 * can happen when the AddBA timer times out for example.
2468 */
2469
2470 /* No barriers since we are under mutex */
2471 lockdep_assert_held(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002472
2473 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2474 tid_data->state = IWL_AGG_OFF;
2475 err = 0;
2476 break;
2477 default:
2478 IWL_ERR(mvm,
2479 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2480 mvmsta->sta_id, tid, tid_data->state);
2481 IWL_ERR(mvm,
2482 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
2483 err = -EINVAL;
2484 }
2485
2486 spin_unlock_bh(&mvmsta->lock);
2487
2488 return err;
2489}
2490
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002491int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2492 struct ieee80211_sta *sta, u16 tid)
2493{
Johannes Berg5b577a92013-11-14 18:20:04 +01002494 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002495 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2496 u16 txq_id;
Johannes Bergb6658ff2013-07-24 13:55:51 +02002497 enum iwl_mvm_agg_state old_state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002498
2499 /*
2500 * First set the agg state to OFF to avoid calling
2501 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2502 */
2503 spin_lock_bh(&mvmsta->lock);
2504 txq_id = tid_data->txq_id;
2505 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
2506 mvmsta->sta_id, tid, txq_id, tid_data->state);
Johannes Bergb6658ff2013-07-24 13:55:51 +02002507 old_state = tid_data->state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002508 tid_data->state = IWL_AGG_OFF;
Eyal Shapiraefed6642014-09-14 15:58:53 +03002509 mvmsta->agg_tids &= ~BIT(tid);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002510 spin_unlock_bh(&mvmsta->lock);
2511
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002512 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002513 /*
2514 * The TXQ is marked as reserved only if no traffic came through yet
2515 * This means no traffic has been sent on this TID (agg'd or not), so
2516 * we no longer have use for the queue. Since it hasn't even been
2517 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2518 * free.
2519 */
2520 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2521 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002522 spin_unlock_bh(&mvm->queue_info_lock);
2523
Johannes Bergb6658ff2013-07-24 13:55:51 +02002524 if (old_state >= IWL_AGG_ON) {
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02002525 iwl_mvm_drain_sta(mvm, mvmsta, true);
Luca Coelho5888a402015-10-06 09:54:57 +03002526 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
Johannes Bergb6658ff2013-07-24 13:55:51 +02002527 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02002528 iwl_trans_wait_tx_queue_empty(mvm->trans,
2529 mvmsta->tfd_queue_msk);
2530 iwl_mvm_drain_sta(mvm, mvmsta, false);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002531
Johannes Bergf7f89e72014-08-05 15:24:44 +02002532 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2533
Liad Kaufmancf961e12015-08-13 19:16:08 +03002534 if (!iwl_mvm_is_dqa_supported(mvm)) {
2535 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
2536
2537 iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
2538 tid, 0);
2539 }
Johannes Bergb6658ff2013-07-24 13:55:51 +02002540 }
2541
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002542 return 0;
2543}
2544
Johannes Berg8ca151b2013-01-24 14:25:36 +01002545static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
2546{
Johannes Berg2dc2a152015-06-16 17:09:18 +02002547 int i, max = -1, max_offs = -1;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002548
2549 lockdep_assert_held(&mvm->mutex);
2550
Johannes Berg2dc2a152015-06-16 17:09:18 +02002551 /* Pick the unused key offset with the highest 'deleted'
2552 * counter. Every time a key is deleted, all the counters
2553 * are incremented and the one that was just deleted is
2554 * reset to zero. Thus, the highest counter is the one
2555 * that was deleted longest ago. Pick that one.
2556 */
2557 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2558 if (test_bit(i, mvm->fw_key_table))
2559 continue;
2560 if (mvm->fw_key_deleted[i] > max) {
2561 max = mvm->fw_key_deleted[i];
2562 max_offs = i;
2563 }
2564 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002565
Johannes Berg2dc2a152015-06-16 17:09:18 +02002566 if (max_offs < 0)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002567 return STA_KEY_IDX_INVALID;
2568
Johannes Berg2dc2a152015-06-16 17:09:18 +02002569 return max_offs;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002570}
2571
Johannes Berg5f7a1842015-12-11 09:36:10 +01002572static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
2573 struct ieee80211_vif *vif,
2574 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002575{
Johannes Berg5b530e92014-12-23 16:00:17 +01002576 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002577
Johannes Berg5f7a1842015-12-11 09:36:10 +01002578 if (sta)
2579 return iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002580
2581 /*
2582 * The device expects GTKs for station interfaces to be
2583 * installed as GTKs for the AP station. If we have no
2584 * station ID, then use AP's station ID.
2585 */
2586 if (vif->type == NL80211_IFTYPE_STATION &&
Avri Altman9513c5e2015-10-19 16:29:11 +02002587 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
2588 u8 sta_id = mvmvif->ap_sta_id;
2589
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03002590 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
2591 lockdep_is_held(&mvm->mutex));
2592
Avri Altman9513c5e2015-10-19 16:29:11 +02002593 /*
2594 * It is possible that the 'sta' parameter is NULL,
2595 * for example when a GTK is removed - the sta_id will then
2596 * be the AP ID, and no station was passed by mac80211.
2597 */
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03002598 if (IS_ERR_OR_NULL(sta))
2599 return NULL;
2600
2601 return iwl_mvm_sta_from_mac80211(sta);
Avri Altman9513c5e2015-10-19 16:29:11 +02002602 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002603
Johannes Berg5f7a1842015-12-11 09:36:10 +01002604 return NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002605}
2606
2607static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
2608 struct iwl_mvm_sta *mvm_sta,
Johannes Bergba3943b2014-11-12 23:54:48 +01002609 struct ieee80211_key_conf *keyconf, bool mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002610 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
2611 u8 key_offset)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002612{
Max Stepanov5a258aa2013-04-07 09:11:21 +03002613 struct iwl_mvm_add_sta_key_cmd cmd = {};
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002614 __le16 key_flags;
Johannes Berg79920742014-11-03 15:43:04 +01002615 int ret;
2616 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002617 u16 keyidx;
2618 int i;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002619 u8 sta_id = mvm_sta->sta_id;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002620
2621 keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2622 STA_KEY_FLG_KEYID_MSK;
2623 key_flags = cpu_to_le16(keyidx);
2624 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
2625
2626 switch (keyconf->cipher) {
2627 case WLAN_CIPHER_SUITE_TKIP:
2628 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
Max Stepanov5a258aa2013-04-07 09:11:21 +03002629 cmd.tkip_rx_tsc_byte2 = tkip_iv32;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002630 for (i = 0; i < 5; i++)
Max Stepanov5a258aa2013-04-07 09:11:21 +03002631 cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
2632 memcpy(cmd.key, keyconf->key, keyconf->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002633 break;
2634 case WLAN_CIPHER_SUITE_CCMP:
2635 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
Max Stepanov5a258aa2013-04-07 09:11:21 +03002636 memcpy(cmd.key, keyconf->key, keyconf->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002637 break;
Johannes Bergba3943b2014-11-12 23:54:48 +01002638 case WLAN_CIPHER_SUITE_WEP104:
2639 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
John W. Linvilleaa0cb082015-01-12 16:18:11 -05002640 /* fall through */
Johannes Bergba3943b2014-11-12 23:54:48 +01002641 case WLAN_CIPHER_SUITE_WEP40:
2642 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
2643 memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
2644 break;
Ayala Beker2a53d162016-04-07 16:21:57 +03002645 case WLAN_CIPHER_SUITE_GCMP_256:
2646 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
2647 /* fall through */
2648 case WLAN_CIPHER_SUITE_GCMP:
2649 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
2650 memcpy(cmd.key, keyconf->key, keyconf->keylen);
2651 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002652 default:
Max Stepanove36e5432013-08-27 19:56:13 +03002653 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
2654 memcpy(cmd.key, keyconf->key, keyconf->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002655 }
2656
Johannes Bergba3943b2014-11-12 23:54:48 +01002657 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002658 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2659
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002660 cmd.key_offset = key_offset;
Max Stepanov5a258aa2013-04-07 09:11:21 +03002661 cmd.key_flags = key_flags;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002662 cmd.sta_id = sta_id;
2663
2664 status = ADD_STA_SUCCESS;
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002665 if (cmd_flags & CMD_ASYNC)
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002666 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
2667 sizeof(cmd), &cmd);
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002668 else
2669 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
2670 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002671
2672 switch (status) {
2673 case ADD_STA_SUCCESS:
2674 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
2675 break;
2676 default:
2677 ret = -EIO;
2678 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
2679 break;
2680 }
2681
2682 return ret;
2683}
2684
2685static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
2686 struct ieee80211_key_conf *keyconf,
2687 u8 sta_id, bool remove_key)
2688{
2689 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
2690
2691 /* verify the key details match the required command's expectations */
Ayala Beker8e160ab2016-04-11 11:37:38 +03002692 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
2693 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
2694 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
2695 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
2696 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
2697 return -EINVAL;
2698
2699 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
2700 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
Johannes Berg8ca151b2013-01-24 14:25:36 +01002701 return -EINVAL;
2702
2703 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
2704 igtk_cmd.sta_id = cpu_to_le32(sta_id);
2705
2706 if (remove_key) {
2707 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
2708 } else {
2709 struct ieee80211_key_seq seq;
2710 const u8 *pn;
2711
Ayala Bekeraa950522016-06-01 00:28:09 +03002712 switch (keyconf->cipher) {
2713 case WLAN_CIPHER_SUITE_AES_CMAC:
2714 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
2715 break;
Ayala Beker8e160ab2016-04-11 11:37:38 +03002716 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
2717 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
2718 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
2719 break;
Ayala Bekeraa950522016-06-01 00:28:09 +03002720 default:
2721 return -EINVAL;
2722 }
2723
Ayala Beker8e160ab2016-04-11 11:37:38 +03002724 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
2725 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
2726 igtk_cmd.ctrl_flags |=
2727 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002728 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
2729 pn = seq.aes_cmac.pn;
2730 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
2731 ((u64) pn[4] << 8) |
2732 ((u64) pn[3] << 16) |
2733 ((u64) pn[2] << 24) |
2734 ((u64) pn[1] << 32) |
2735 ((u64) pn[0] << 40));
2736 }
2737
2738 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
2739 remove_key ? "removing" : "installing",
2740 igtk_cmd.sta_id);
2741
Ayala Beker8e160ab2016-04-11 11:37:38 +03002742 if (!iwl_mvm_has_new_rx_api(mvm)) {
2743 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
2744 .ctrl_flags = igtk_cmd.ctrl_flags,
2745 .key_id = igtk_cmd.key_id,
2746 .sta_id = igtk_cmd.sta_id,
2747 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
2748 };
2749
2750 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
2751 ARRAY_SIZE(igtk_cmd_v1.igtk));
2752 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
2753 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
2754 }
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002755 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01002756 sizeof(igtk_cmd), &igtk_cmd);
2757}
2758
2759
2760static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
2761 struct ieee80211_vif *vif,
2762 struct ieee80211_sta *sta)
2763{
Johannes Berg5b530e92014-12-23 16:00:17 +01002764 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002765
2766 if (sta)
2767 return sta->addr;
2768
2769 if (vif->type == NL80211_IFTYPE_STATION &&
2770 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
2771 u8 sta_id = mvmvif->ap_sta_id;
2772 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
2773 lockdep_is_held(&mvm->mutex));
2774 return sta->addr;
2775 }
2776
2777
2778 return NULL;
2779}
2780
Johannes Berg2f6319d2014-11-12 23:39:56 +01002781static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
2782 struct ieee80211_vif *vif,
2783 struct ieee80211_sta *sta,
Johannes Bergba3943b2014-11-12 23:54:48 +01002784 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002785 u8 key_offset,
Johannes Bergba3943b2014-11-12 23:54:48 +01002786 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002787{
Johannes Berg2f6319d2014-11-12 23:39:56 +01002788 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002789 int ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002790 const u8 *addr;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002791 struct ieee80211_key_seq seq;
2792 u16 p1k[5];
2793
Johannes Berg8ca151b2013-01-24 14:25:36 +01002794 switch (keyconf->cipher) {
2795 case WLAN_CIPHER_SUITE_TKIP:
2796 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
2797 /* get phase 1 key from mac80211 */
2798 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
2799 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
Johannes Bergba3943b2014-11-12 23:54:48 +01002800 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002801 seq.tkip.iv32, p1k, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002802 break;
2803 case WLAN_CIPHER_SUITE_CCMP:
Johannes Bergba3943b2014-11-12 23:54:48 +01002804 case WLAN_CIPHER_SUITE_WEP40:
2805 case WLAN_CIPHER_SUITE_WEP104:
Ayala Beker2a53d162016-04-07 16:21:57 +03002806 case WLAN_CIPHER_SUITE_GCMP:
2807 case WLAN_CIPHER_SUITE_GCMP_256:
Johannes Bergba3943b2014-11-12 23:54:48 +01002808 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002809 0, NULL, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002810 break;
2811 default:
Johannes Bergba3943b2014-11-12 23:54:48 +01002812 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002813 0, NULL, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002814 }
2815
Johannes Berg8ca151b2013-01-24 14:25:36 +01002816 return ret;
2817}
2818
Johannes Berg2f6319d2014-11-12 23:39:56 +01002819static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
Johannes Bergba3943b2014-11-12 23:54:48 +01002820 struct ieee80211_key_conf *keyconf,
2821 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002822{
Max Stepanov5a258aa2013-04-07 09:11:21 +03002823 struct iwl_mvm_add_sta_key_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01002824 __le16 key_flags;
Johannes Berg79920742014-11-03 15:43:04 +01002825 int ret;
2826 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002827
Emmanuel Grumbach8115efb2013-02-05 10:08:35 +02002828 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2829 STA_KEY_FLG_KEYID_MSK);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002830 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2831 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2832
Johannes Bergba3943b2014-11-12 23:54:48 +01002833 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002834 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2835
Max Stepanov5a258aa2013-04-07 09:11:21 +03002836 cmd.key_flags = key_flags;
2837 cmd.key_offset = keyconf->hw_key_idx;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002838 cmd.sta_id = sta_id;
2839
Johannes Berg8ca151b2013-01-24 14:25:36 +01002840 status = ADD_STA_SUCCESS;
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002841 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
2842 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002843
2844 switch (status) {
2845 case ADD_STA_SUCCESS:
2846 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2847 break;
2848 default:
2849 ret = -EIO;
2850 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2851 break;
2852 }
2853
2854 return ret;
2855}
2856
Johannes Berg2f6319d2014-11-12 23:39:56 +01002857int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
2858 struct ieee80211_vif *vif,
2859 struct ieee80211_sta *sta,
2860 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002861 u8 key_offset)
Johannes Berg2f6319d2014-11-12 23:39:56 +01002862{
Johannes Bergba3943b2014-11-12 23:54:48 +01002863 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01002864 struct iwl_mvm_sta *mvm_sta;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002865 u8 sta_id;
2866 int ret;
Matti Gottlieb11828db2015-06-01 15:15:11 +03002867 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
Johannes Berg2f6319d2014-11-12 23:39:56 +01002868
2869 lockdep_assert_held(&mvm->mutex);
2870
2871 /* Get the station id from the mvm local station table */
Johannes Berg5f7a1842015-12-11 09:36:10 +01002872 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
2873 if (!mvm_sta) {
2874 IWL_ERR(mvm, "Failed to find station\n");
Johannes Berg2f6319d2014-11-12 23:39:56 +01002875 return -EINVAL;
2876 }
Johannes Berg5f7a1842015-12-11 09:36:10 +01002877 sta_id = mvm_sta->sta_id;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002878
Ayala Beker8e160ab2016-04-11 11:37:38 +03002879 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
2880 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
2881 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01002882 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
2883 goto end;
2884 }
2885
2886 /*
2887 * It is possible that the 'sta' parameter is NULL, and thus
2888 * there is a need to retrieve the sta from the local station table.
2889 */
2890 if (!sta) {
2891 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
2892 lockdep_is_held(&mvm->mutex));
2893 if (IS_ERR_OR_NULL(sta)) {
2894 IWL_ERR(mvm, "Invalid station id\n");
2895 return -EINVAL;
2896 }
2897 }
2898
2899 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
2900 return -EINVAL;
2901
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002902 /* If the key_offset is not pre-assigned, we need to find a
2903 * new offset to use. In normal cases, the offset is not
2904 * pre-assigned, but during HW_RESTART we want to reuse the
2905 * same indices, so we pass them when this function is called.
2906 *
2907 * In D3 entry, we need to hardcoded the indices (because the
2908 * firmware hardcodes the PTK offset to 0). In this case, we
2909 * need to make sure we don't overwrite the hw_key_idx in the
2910 * keyconf structure, because otherwise we cannot configure
2911 * the original ones back when resuming.
2912 */
2913 if (key_offset == STA_KEY_IDX_INVALID) {
2914 key_offset = iwl_mvm_set_fw_key_idx(mvm);
2915 if (key_offset == STA_KEY_IDX_INVALID)
Johannes Berg2f6319d2014-11-12 23:39:56 +01002916 return -ENOSPC;
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002917 keyconf->hw_key_idx = key_offset;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002918 }
2919
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002920 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02002921 if (ret)
Johannes Bergba3943b2014-11-12 23:54:48 +01002922 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01002923
2924 /*
2925 * For WEP, the same key is used for multicast and unicast. Upload it
2926 * again, using the same key offset, and now pointing the other one
2927 * to the same key slot (offset).
2928 * If this fails, remove the original as well.
2929 */
2930 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2931 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002932 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
2933 key_offset, !mcast);
Johannes Bergba3943b2014-11-12 23:54:48 +01002934 if (ret) {
Johannes Bergba3943b2014-11-12 23:54:48 +01002935 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02002936 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01002937 }
2938 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01002939
Luca Coelho9c3deeb2015-11-11 01:06:17 +02002940 __set_bit(key_offset, mvm->fw_key_table);
2941
Johannes Berg2f6319d2014-11-12 23:39:56 +01002942end:
2943 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
2944 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
Matti Gottlieb11828db2015-06-01 15:15:11 +03002945 sta ? sta->addr : zero_addr, ret);
Johannes Berg2f6319d2014-11-12 23:39:56 +01002946 return ret;
2947}
2948
2949int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
2950 struct ieee80211_vif *vif,
2951 struct ieee80211_sta *sta,
2952 struct ieee80211_key_conf *keyconf)
2953{
Johannes Bergba3943b2014-11-12 23:54:48 +01002954 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01002955 struct iwl_mvm_sta *mvm_sta;
2956 u8 sta_id = IWL_MVM_STATION_COUNT;
Johannes Berg2dc2a152015-06-16 17:09:18 +02002957 int ret, i;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002958
2959 lockdep_assert_held(&mvm->mutex);
2960
Johannes Berg5f7a1842015-12-11 09:36:10 +01002961 /* Get the station from the mvm local station table */
2962 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
Johannes Berg2f6319d2014-11-12 23:39:56 +01002963
2964 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
2965 keyconf->keyidx, sta_id);
2966
Ayala Beker8e160ab2016-04-11 11:37:38 +03002967 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
2968 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
2969 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
Johannes Berg2f6319d2014-11-12 23:39:56 +01002970 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
2971
2972 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
2973 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
2974 keyconf->hw_key_idx);
2975 return -ENOENT;
2976 }
2977
Johannes Berg2dc2a152015-06-16 17:09:18 +02002978 /* track which key was deleted last */
2979 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2980 if (mvm->fw_key_deleted[i] < U8_MAX)
2981 mvm->fw_key_deleted[i]++;
2982 }
2983 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
2984
Johannes Berg5f7a1842015-12-11 09:36:10 +01002985 if (!mvm_sta) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01002986 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
2987 return 0;
2988 }
2989
Johannes Berg5f7a1842015-12-11 09:36:10 +01002990 sta_id = mvm_sta->sta_id;
2991
Johannes Bergba3943b2014-11-12 23:54:48 +01002992 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
2993 if (ret)
2994 return ret;
2995
2996 /* delete WEP key twice to get rid of (now useless) offset */
2997 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2998 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
2999 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3000
3001 return ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003002}
3003
Johannes Berg8ca151b2013-01-24 14:25:36 +01003004void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3005 struct ieee80211_vif *vif,
3006 struct ieee80211_key_conf *keyconf,
3007 struct ieee80211_sta *sta, u32 iv32,
3008 u16 *phase1key)
3009{
Beni Levc3eb5362013-02-06 17:22:18 +02003010 struct iwl_mvm_sta *mvm_sta;
Johannes Bergba3943b2014-11-12 23:54:48 +01003011 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003012
Beni Levc3eb5362013-02-06 17:22:18 +02003013 rcu_read_lock();
3014
Johannes Berg5f7a1842015-12-11 09:36:10 +01003015 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3016 if (WARN_ON_ONCE(!mvm_sta))
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003017 goto unlock;
Johannes Bergba3943b2014-11-12 23:54:48 +01003018 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003019 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003020
3021 unlock:
Beni Levc3eb5362013-02-06 17:22:18 +02003022 rcu_read_unlock();
Johannes Berg8ca151b2013-01-24 14:25:36 +01003023}
3024
Johannes Berg9cc40712013-02-15 22:47:48 +01003025void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3026 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003027{
Johannes Berg5b577a92013-11-14 18:20:04 +01003028 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003029 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003030 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003031 .sta_id = mvmsta->sta_id,
Emmanuel Grumbach5af01772013-06-09 12:59:24 +03003032 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
Johannes Berg9cc40712013-02-15 22:47:48 +01003033 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003034 };
3035 int ret;
3036
Sara Sharon854c5702016-01-26 13:17:47 +02003037 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3038 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003039 if (ret)
3040 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3041}
3042
Johannes Berg9cc40712013-02-15 22:47:48 +01003043void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3044 struct ieee80211_sta *sta,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003045 enum ieee80211_frame_release_type reason,
Johannes Berg3e56ead2013-02-15 22:23:18 +01003046 u16 cnt, u16 tids, bool more_data,
3047 bool agg)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003048{
Johannes Berg5b577a92013-11-14 18:20:04 +01003049 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003050 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003051 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003052 .sta_id = mvmsta->sta_id,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003053 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3054 .sleep_tx_count = cpu_to_le16(cnt),
Johannes Berg9cc40712013-02-15 22:47:48 +01003055 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003056 };
Johannes Berg3e56ead2013-02-15 22:23:18 +01003057 int tid, ret;
3058 unsigned long _tids = tids;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003059
Johannes Berg3e56ead2013-02-15 22:23:18 +01003060 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3061 * Note that this field is reserved and unused by firmware not
3062 * supporting GO uAPSD, so it's safe to always do this.
3063 */
3064 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3065 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3066
3067 /* If we're releasing frames from aggregation queues then check if the
3068 * all queues combined that we're releasing frames from have
3069 * - more frames than the service period, in which case more_data
3070 * needs to be set
3071 * - fewer than 'cnt' frames, in which case we need to adjust the
3072 * firmware command (but do that unconditionally)
3073 */
3074 if (agg) {
3075 int remaining = cnt;
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003076 int sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003077
3078 spin_lock_bh(&mvmsta->lock);
3079 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3080 struct iwl_mvm_tid_data *tid_data;
3081 u16 n_queued;
3082
3083 tid_data = &mvmsta->tid_data[tid];
3084 if (WARN(tid_data->state != IWL_AGG_ON &&
3085 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
3086 "TID %d state is %d\n",
3087 tid, tid_data->state)) {
3088 spin_unlock_bh(&mvmsta->lock);
3089 ieee80211_sta_eosp(sta);
3090 return;
3091 }
3092
3093 n_queued = iwl_mvm_tid_queued(tid_data);
3094 if (n_queued > remaining) {
3095 more_data = true;
3096 remaining = 0;
3097 break;
3098 }
3099 remaining -= n_queued;
3100 }
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003101 sleep_tx_count = cnt - remaining;
3102 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3103 mvmsta->sleep_tx_count = sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003104 spin_unlock_bh(&mvmsta->lock);
3105
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003106 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003107 if (WARN_ON(cnt - remaining == 0)) {
3108 ieee80211_sta_eosp(sta);
3109 return;
3110 }
3111 }
3112
3113 /* Note: this is ignored by firmware not supporting GO uAPSD */
3114 if (more_data)
3115 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
3116
3117 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3118 mvmsta->next_status_eosp = true;
3119 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
3120 } else {
3121 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
3122 }
3123
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +02003124 /* block the Tx queues until the FW updated the sleep Tx count */
3125 iwl_trans_block_txq_ptrs(mvm->trans, true);
3126
3127 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3128 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
Sara Sharon854c5702016-01-26 13:17:47 +02003129 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003130 if (ret)
3131 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3132}
Johannes Berg3e56ead2013-02-15 22:23:18 +01003133
Johannes Berg04168412015-06-23 21:22:09 +02003134void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3135 struct iwl_rx_cmd_buffer *rxb)
Johannes Berg3e56ead2013-02-15 22:23:18 +01003136{
3137 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3138 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3139 struct ieee80211_sta *sta;
3140 u32 sta_id = le32_to_cpu(notif->sta_id);
3141
3142 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
Johannes Berg04168412015-06-23 21:22:09 +02003143 return;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003144
3145 rcu_read_lock();
3146 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3147 if (!IS_ERR_OR_NULL(sta))
3148 ieee80211_sta_eosp(sta);
3149 rcu_read_unlock();
Johannes Berg3e56ead2013-02-15 22:23:18 +01003150}
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003151
3152void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3153 struct iwl_mvm_sta *mvmsta, bool disable)
3154{
3155 struct iwl_mvm_add_sta_cmd cmd = {
3156 .add_modify = STA_MODE_MODIFY,
3157 .sta_id = mvmsta->sta_id,
3158 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3159 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3160 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3161 };
3162 int ret;
3163
Sara Sharon854c5702016-01-26 13:17:47 +02003164 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3165 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003166 if (ret)
3167 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3168}
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003169
3170void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3171 struct ieee80211_sta *sta,
3172 bool disable)
3173{
3174 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3175
3176 spin_lock_bh(&mvm_sta->lock);
3177
3178 if (mvm_sta->disable_tx == disable) {
3179 spin_unlock_bh(&mvm_sta->lock);
3180 return;
3181 }
3182
3183 mvm_sta->disable_tx = disable;
3184
3185 /*
Sara Sharon0d365ae2015-03-31 12:24:05 +03003186 * Tell mac80211 to start/stop queuing tx for this station,
3187 * but don't stop queuing if there are still pending frames
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003188 * for this station.
3189 */
3190 if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
3191 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3192
3193 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3194
3195 spin_unlock_bh(&mvm_sta->lock);
3196}
3197
3198void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3199 struct iwl_mvm_vif *mvmvif,
3200 bool disable)
3201{
3202 struct ieee80211_sta *sta;
3203 struct iwl_mvm_sta *mvm_sta;
3204 int i;
3205
3206 lockdep_assert_held(&mvm->mutex);
3207
3208 /* Block/unblock all the stations of the given mvmvif */
3209 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
3210 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3211 lockdep_is_held(&mvm->mutex));
3212 if (IS_ERR_OR_NULL(sta))
3213 continue;
3214
3215 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3216 if (mvm_sta->mac_id_n_color !=
3217 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3218 continue;
3219
3220 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3221 }
3222}
Luciano Coelhodc88b4b2014-11-10 11:10:14 +02003223
3224void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3225{
3226 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3227 struct iwl_mvm_sta *mvmsta;
3228
3229 rcu_read_lock();
3230
3231 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3232
3233 if (!WARN_ON(!mvmsta))
3234 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3235
3236 rcu_read_unlock();
3237}