blob: a65030f4019dba7ea0d6c2ede1c4924df2d5eba3 [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03008 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon854c5702016-01-26 13:17:47 +020010 * Copyright(c) 2016 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010011 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020027 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010028 *
29 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020030 * Intel Linux Wireless <linuxwifi@intel.com>
Johannes Berg8ca151b2013-01-24 14:25:36 +010031 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +030035 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon854c5702016-01-26 13:17:47 +020037 * Copyright(c) 2016 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010038 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67#include <net/mac80211.h>
68
69#include "mvm.h"
70#include "sta.h"
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +030071#include "rs.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010072
Sara Sharon854c5702016-01-26 13:17:47 +020073/*
74 * New version of ADD_STA_sta command added new fields at the end of the
75 * structure, so sending the size of the relevant API's structure is enough to
76 * support both API versions.
77 */
78static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
79{
80 return iwl_mvm_has_new_rx_api(mvm) ?
81 sizeof(struct iwl_mvm_add_sta_cmd) :
82 sizeof(struct iwl_mvm_add_sta_cmd_v7);
83}
84
Eliad Pellerb92e6612014-01-23 17:58:23 +020085static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
86 enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +010087{
88 int sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +020089 u32 reserved_ids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +010090
Eliad Pellerb92e6612014-01-23 17:58:23 +020091 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
Johannes Berg8ca151b2013-01-24 14:25:36 +010092 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
93
94 lockdep_assert_held(&mvm->mutex);
95
Eliad Pellerb92e6612014-01-23 17:58:23 +020096 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
97 if (iftype != NL80211_IFTYPE_STATION)
98 reserved_ids = BIT(0);
99
Johannes Berg8ca151b2013-01-24 14:25:36 +0100100 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
Eliad Pellerb92e6612014-01-23 17:58:23 +0200101 for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
102 if (BIT(sta_id) & reserved_ids)
103 continue;
104
Johannes Berg8ca151b2013-01-24 14:25:36 +0100105 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
106 lockdep_is_held(&mvm->mutex)))
107 return sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +0200108 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100109 return IWL_MVM_STATION_COUNT;
110}
111
Johannes Berg7a453972013-02-12 13:10:44 +0100112/* send station add/update command to firmware */
113int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Liad Kaufman24afba72015-07-28 18:56:08 +0300114 bool update, unsigned int flags)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100115{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +0100116 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300117 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
118 .sta_id = mvm_sta->sta_id,
119 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
120 .add_modify = update ? 1 : 0,
121 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
122 STA_FLG_MIMO_EN_MSK),
Liad Kaufmancf0cda12015-09-24 10:44:12 +0200123 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300124 };
Johannes Berg8ca151b2013-01-24 14:25:36 +0100125 int ret;
126 u32 status;
127 u32 agg_size = 0, mpdu_dens = 0;
128
Liad Kaufman24afba72015-07-28 18:56:08 +0300129 if (!update || (flags & STA_MODIFY_QUEUES)) {
Johannes Berg7a453972013-02-12 13:10:44 +0100130 add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
131 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
Liad Kaufman24afba72015-07-28 18:56:08 +0300132
133 if (flags & STA_MODIFY_QUEUES)
134 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
Johannes Berg7a453972013-02-12 13:10:44 +0100135 }
Johannes Berg5bc5aaa2013-02-12 14:35:36 +0100136
137 switch (sta->bandwidth) {
138 case IEEE80211_STA_RX_BW_160:
139 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
140 /* fall through */
141 case IEEE80211_STA_RX_BW_80:
142 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
143 /* fall through */
144 case IEEE80211_STA_RX_BW_40:
145 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
146 /* fall through */
147 case IEEE80211_STA_RX_BW_20:
148 if (sta->ht_cap.ht_supported)
149 add_sta_cmd.station_flags |=
150 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
151 break;
152 }
153
154 switch (sta->rx_nss) {
155 case 1:
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
157 break;
158 case 2:
159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
160 break;
161 case 3 ... 8:
162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
163 break;
164 }
165
166 switch (sta->smps_mode) {
167 case IEEE80211_SMPS_AUTOMATIC:
168 case IEEE80211_SMPS_NUM_MODES:
169 WARN_ON(1);
170 break;
171 case IEEE80211_SMPS_STATIC:
172 /* override NSS */
173 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
174 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
175 break;
176 case IEEE80211_SMPS_DYNAMIC:
177 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
178 break;
179 case IEEE80211_SMPS_OFF:
180 /* nothing */
181 break;
182 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100183
184 if (sta->ht_cap.ht_supported) {
185 add_sta_cmd.station_flags_msk |=
186 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
187 STA_FLG_AGG_MPDU_DENS_MSK);
188
189 mpdu_dens = sta->ht_cap.ampdu_density;
190 }
191
192 if (sta->vht_cap.vht_supported) {
193 agg_size = sta->vht_cap.cap &
194 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
195 agg_size >>=
196 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
197 } else if (sta->ht_cap.ht_supported) {
198 agg_size = sta->ht_cap.ampdu_factor;
199 }
200
201 add_sta_cmd.station_flags |=
202 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
203 add_sta_cmd.station_flags |=
204 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
205
206 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +0200207 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
208 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +0300209 &add_sta_cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100210 if (ret)
211 return ret;
212
Sara Sharon837c4da2016-01-07 16:50:45 +0200213 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100214 case ADD_STA_SUCCESS:
215 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
216 break;
217 default:
218 ret = -EIO;
219 IWL_ERR(mvm, "ADD_STA failed\n");
220 break;
221 }
222
223 return ret;
224}
225
Sara Sharon10b2b202016-03-20 16:23:41 +0200226static void iwl_mvm_rx_agg_session_expired(unsigned long data)
227{
228 struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
229 struct iwl_mvm_baid_data *ba_data;
230 struct ieee80211_sta *sta;
231 struct iwl_mvm_sta *mvm_sta;
232 unsigned long timeout;
233
234 rcu_read_lock();
235
236 ba_data = rcu_dereference(*rcu_ptr);
237
238 if (WARN_ON(!ba_data))
239 goto unlock;
240
241 if (!ba_data->timeout)
242 goto unlock;
243
244 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
245 if (time_is_after_jiffies(timeout)) {
246 mod_timer(&ba_data->session_timer, timeout);
247 goto unlock;
248 }
249
250 /* Timer expired */
251 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
252 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
253 ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
254 sta->addr, ba_data->tid);
255unlock:
256 rcu_read_unlock();
257}
258
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300259static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
260 struct ieee80211_sta *sta)
261{
262 unsigned long used_hw_queues;
263 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +0200264 unsigned int wdg_timeout =
265 iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300266 u32 ac;
267
268 lockdep_assert_held(&mvm->mutex);
269
270 used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
271
272 /* Find available queues, and allocate them to the ACs */
273 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
274 u8 queue = find_first_zero_bit(&used_hw_queues,
275 mvm->first_agg_queue);
276
277 if (queue >= mvm->first_agg_queue) {
278 IWL_ERR(mvm, "Failed to allocate STA queue\n");
279 return -EBUSY;
280 }
281
282 __set_bit(queue, &used_hw_queues);
283 mvmsta->hw_queue[ac] = queue;
284 }
285
286 /* Found a place for all queues - enable them */
287 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
288 iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300289 mvmsta->hw_queue[ac],
Liad Kaufman5c1156e2015-07-22 17:59:53 +0300290 iwl_mvm_ac_to_tx_fifo[ac], 0,
291 wdg_timeout);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300292 mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
293 }
294
295 return 0;
296}
297
298static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
299 struct ieee80211_sta *sta)
300{
301 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
302 unsigned long sta_msk;
303 int i;
304
305 lockdep_assert_held(&mvm->mutex);
306
307 /* disable the TDLS STA-specific queues */
308 sta_msk = mvmsta->tfd_queue_msk;
Emmanuel Grumbacha4ca3ed2015-01-20 17:07:10 +0200309 for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
Arik Nemtsov06ecdba2015-10-12 14:47:11 +0300310 iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300311}
312
Liad Kaufman9794c642015-08-19 17:34:28 +0300313/* Disable aggregations for a bitmap of TIDs for a given station */
314static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
315 unsigned long disable_agg_tids,
316 bool remove_queue)
317{
318 struct iwl_mvm_add_sta_cmd cmd = {};
319 struct ieee80211_sta *sta;
320 struct iwl_mvm_sta *mvmsta;
321 u32 status;
322 u8 sta_id;
323 int ret;
324
325 spin_lock_bh(&mvm->queue_info_lock);
326 sta_id = mvm->queue_info[queue].ra_sta_id;
327 spin_unlock_bh(&mvm->queue_info_lock);
328
329 rcu_read_lock();
330
331 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
332
333 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
334 rcu_read_unlock();
335 return -EINVAL;
336 }
337
338 mvmsta = iwl_mvm_sta_from_mac80211(sta);
339
340 mvmsta->tid_disable_agg |= disable_agg_tids;
341
342 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
343 cmd.sta_id = mvmsta->sta_id;
344 cmd.add_modify = STA_MODE_MODIFY;
345 cmd.modify_mask = STA_MODIFY_QUEUES;
346 if (disable_agg_tids)
347 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
348 if (remove_queue)
349 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
350 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
351 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
352
353 rcu_read_unlock();
354
355 /* Notify FW of queue removal from the STA queues */
356 status = ADD_STA_SUCCESS;
357 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
358 iwl_mvm_add_sta_cmd_size(mvm),
359 &cmd, &status);
360
361 return ret;
362}
363
Liad Kaufman42db09c2016-05-02 14:01:14 +0300364static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
365{
366 struct ieee80211_sta *sta;
367 struct iwl_mvm_sta *mvmsta;
368 unsigned long tid_bitmap;
369 unsigned long agg_tids = 0;
370 s8 sta_id;
371 int tid;
372
373 lockdep_assert_held(&mvm->mutex);
374
375 spin_lock_bh(&mvm->queue_info_lock);
376 sta_id = mvm->queue_info[queue].ra_sta_id;
377 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
378 spin_unlock_bh(&mvm->queue_info_lock);
379
380 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
381 lockdep_is_held(&mvm->mutex));
382
383 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
384 return -EINVAL;
385
386 mvmsta = iwl_mvm_sta_from_mac80211(sta);
387
388 spin_lock_bh(&mvmsta->lock);
389 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
390 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
391 agg_tids |= BIT(tid);
392 }
393 spin_unlock_bh(&mvmsta->lock);
394
395 return agg_tids;
396}
397
Liad Kaufman9794c642015-08-19 17:34:28 +0300398/*
399 * Remove a queue from a station's resources.
400 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
401 * doesn't disable the queue
402 */
403static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
404{
405 struct ieee80211_sta *sta;
406 struct iwl_mvm_sta *mvmsta;
407 unsigned long tid_bitmap;
408 unsigned long disable_agg_tids = 0;
409 u8 sta_id;
410 int tid;
411
412 lockdep_assert_held(&mvm->mutex);
413
414 spin_lock_bh(&mvm->queue_info_lock);
415 sta_id = mvm->queue_info[queue].ra_sta_id;
416 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
417 spin_unlock_bh(&mvm->queue_info_lock);
418
419 rcu_read_lock();
420
421 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
422
423 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
424 rcu_read_unlock();
425 return 0;
426 }
427
428 mvmsta = iwl_mvm_sta_from_mac80211(sta);
429
430 spin_lock_bh(&mvmsta->lock);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300431 /* Unmap MAC queues and TIDs from this queue */
Liad Kaufman9794c642015-08-19 17:34:28 +0300432 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300433 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
434 disable_agg_tids |= BIT(tid);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300435 mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
Liad Kaufman9794c642015-08-19 17:34:28 +0300436 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300437
Liad Kaufman42db09c2016-05-02 14:01:14 +0300438 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
Liad Kaufman9794c642015-08-19 17:34:28 +0300439 spin_unlock_bh(&mvmsta->lock);
440
441 rcu_read_unlock();
442
Liad Kaufman42db09c2016-05-02 14:01:14 +0300443 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman9794c642015-08-19 17:34:28 +0300444 /* Unmap MAC queues and TIDs from this queue */
445 mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
446 mvm->queue_info[queue].hw_queue_refcount = 0;
447 mvm->queue_info[queue].tid_bitmap = 0;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300448 spin_unlock_bh(&mvm->queue_info_lock);
Liad Kaufman9794c642015-08-19 17:34:28 +0300449
450 return disable_agg_tids;
451}
452
Liad Kaufman42db09c2016-05-02 14:01:14 +0300453static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
454 unsigned long tfd_queue_mask, u8 ac)
455{
456 int queue = 0;
457 u8 ac_to_queue[IEEE80211_NUM_ACS];
458 int i;
459
460 lockdep_assert_held(&mvm->queue_info_lock);
461
462 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
463
464 /* See what ACs the existing queues for this STA have */
465 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
466 /* Only DATA queues can be shared */
467 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
468 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
469 continue;
470
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200471 /* Don't try and take queues being reconfigured */
472 if (mvm->queue_info[queue].status ==
473 IWL_MVM_QUEUE_RECONFIGURING)
474 continue;
475
Liad Kaufman42db09c2016-05-02 14:01:14 +0300476 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
477 }
478
479 /*
480 * The queue to share is chosen only from DATA queues as follows (in
481 * descending priority):
482 * 1. An AC_BE queue
483 * 2. Same AC queue
484 * 3. Highest AC queue that is lower than new AC
485 * 4. Any existing AC (there always is at least 1 DATA queue)
486 */
487
488 /* Priority 1: An AC_BE queue */
489 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
490 queue = ac_to_queue[IEEE80211_AC_BE];
491 /* Priority 2: Same AC queue */
492 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
493 queue = ac_to_queue[ac];
494 /* Priority 3a: If new AC is VO and VI exists - use VI */
495 else if (ac == IEEE80211_AC_VO &&
496 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
497 queue = ac_to_queue[IEEE80211_AC_VI];
498 /* Priority 3b: No BE so only AC less than the new one is BK */
499 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
500 queue = ac_to_queue[IEEE80211_AC_BK];
501 /* Priority 4a: No BE nor BK - use VI if exists */
502 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
503 queue = ac_to_queue[IEEE80211_AC_VI];
504 /* Priority 4b: No BE, BK nor VI - use VO if exists */
505 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
506 queue = ac_to_queue[IEEE80211_AC_VO];
507
508 /* Make sure queue found (or not) is legal */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200509 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
510 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
511 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
Liad Kaufman42db09c2016-05-02 14:01:14 +0300512 IWL_ERR(mvm, "No DATA queues available to share\n");
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200513 return -ENOSPC;
514 }
515
516 /* Make sure the queue isn't in the middle of being reconfigured */
517 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
518 IWL_ERR(mvm,
519 "TXQ %d is in the middle of re-config - try again\n",
520 queue);
521 return -EBUSY;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300522 }
523
524 return queue;
525}
526
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200527/*
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200528 * If a given queue has a higher AC than the TID stream that is being compared
529 * to, the queue needs to be redirected to the lower AC. This function does that
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200530 * in such a case, otherwise - if no redirection required - it does nothing,
531 * unless the %force param is true.
532 */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200533int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
534 int ac, int ssn, unsigned int wdg_timeout,
535 bool force)
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200536{
537 struct iwl_scd_txq_cfg_cmd cmd = {
538 .scd_queue = queue,
Liad Kaufmanf7c692d2016-03-08 10:41:32 +0200539 .action = SCD_CFG_DISABLE_QUEUE,
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200540 };
541 bool shared_queue;
542 unsigned long mq;
543 int ret;
544
545 /*
546 * If the AC is lower than current one - FIFO needs to be redirected to
547 * the lowest one of the streams in the queue. Check if this is needed
548 * here.
549 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
550 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
551 * we need to check if the numerical value of X is LARGER than of Y.
552 */
553 spin_lock_bh(&mvm->queue_info_lock);
554 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
555 spin_unlock_bh(&mvm->queue_info_lock);
556
557 IWL_DEBUG_TX_QUEUES(mvm,
558 "No redirection needed on TXQ #%d\n",
559 queue);
560 return 0;
561 }
562
563 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
564 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200565 cmd.tid = mvm->queue_info[queue].txq_tid;
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200566 mq = mvm->queue_info[queue].hw_queue_to_mac80211;
567 shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
568 spin_unlock_bh(&mvm->queue_info_lock);
569
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200570 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200571 queue, iwl_mvm_ac_to_tx_fifo[ac]);
572
573 /* Stop MAC queues and wait for this queue to empty */
574 iwl_mvm_stop_mac_queues(mvm, mq);
575 ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue));
576 if (ret) {
577 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
578 queue);
579 ret = -EIO;
580 goto out;
581 }
582
583 /* Before redirecting the queue we need to de-activate it */
584 iwl_trans_txq_disable(mvm->trans, queue, false);
585 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
586 if (ret)
587 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
588 ret);
589
590 /* Make sure the SCD wrptr is correctly set before reconfiguring */
Sara Sharonca3b9c62016-06-30 16:14:02 +0300591 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200592
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200593 /* Update the TID "owner" of the queue */
594 spin_lock_bh(&mvm->queue_info_lock);
595 mvm->queue_info[queue].txq_tid = tid;
596 spin_unlock_bh(&mvm->queue_info_lock);
597
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200598 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
599
600 /* Redirect to lower AC */
601 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
602 cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
603 ssn);
604
605 /* Update AC marking of the queue */
606 spin_lock_bh(&mvm->queue_info_lock);
607 mvm->queue_info[queue].mac80211_ac = ac;
608 spin_unlock_bh(&mvm->queue_info_lock);
609
610 /*
611 * Mark queue as shared in transport if shared
612 * Note this has to be done after queue enablement because enablement
613 * can also set this value, and there is no indication there to shared
614 * queues
615 */
616 if (shared_queue)
617 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
618
619out:
620 /* Continue using the MAC queues */
621 iwl_mvm_start_mac_queues(mvm, mq);
622
623 return ret;
624}
625
Liad Kaufman24afba72015-07-28 18:56:08 +0300626static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
627 struct ieee80211_sta *sta, u8 ac, int tid,
628 struct ieee80211_hdr *hdr)
629{
630 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
631 struct iwl_trans_txq_scd_cfg cfg = {
632 .fifo = iwl_mvm_ac_to_tx_fifo[ac],
633 .sta_id = mvmsta->sta_id,
634 .tid = tid,
635 .frame_limit = IWL_FRAME_LIMIT,
636 };
637 unsigned int wdg_timeout =
638 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
639 u8 mac_queue = mvmsta->vif->hw_queue[ac];
640 int queue = -1;
Liad Kaufman9794c642015-08-19 17:34:28 +0300641 bool using_inactive_queue = false;
642 unsigned long disable_agg_tids = 0;
643 enum iwl_mvm_agg_state queue_state;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300644 bool shared_queue = false;
Liad Kaufman24afba72015-07-28 18:56:08 +0300645 int ssn;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300646 unsigned long tfd_queue_mask;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300647 int ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300648
649 lockdep_assert_held(&mvm->mutex);
650
Liad Kaufman42db09c2016-05-02 14:01:14 +0300651 spin_lock_bh(&mvmsta->lock);
652 tfd_queue_mask = mvmsta->tfd_queue_msk;
653 spin_unlock_bh(&mvmsta->lock);
654
Liad Kaufmand2515a92016-03-23 16:31:08 +0200655 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300656
657 /*
658 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
659 * exists
660 */
661 if (!ieee80211_is_data_qos(hdr->frame_control) ||
662 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300663 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
664 IWL_MVM_DQA_MIN_MGMT_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300665 IWL_MVM_DQA_MAX_MGMT_QUEUE);
666 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
667 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
668 queue);
669
670 /* If no such queue is found, we'll use a DATA queue instead */
671 }
672
Liad Kaufman9794c642015-08-19 17:34:28 +0300673 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
674 (mvm->queue_info[mvmsta->reserved_queue].status ==
675 IWL_MVM_QUEUE_RESERVED ||
676 mvm->queue_info[mvmsta->reserved_queue].status ==
677 IWL_MVM_QUEUE_INACTIVE)) {
Liad Kaufman24afba72015-07-28 18:56:08 +0300678 queue = mvmsta->reserved_queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300679 mvm->queue_info[queue].reserved = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300680 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
681 }
682
683 if (queue < 0)
Liad Kaufman9794c642015-08-19 17:34:28 +0300684 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
685 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300686 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufmancf961e12015-08-13 19:16:08 +0300687
688 /*
Liad Kaufman9794c642015-08-19 17:34:28 +0300689 * Check if this queue is already allocated but inactive.
690 * In such a case, we'll need to first free this queue before enabling
691 * it again, so we'll mark it as reserved to make sure no new traffic
692 * arrives on it
693 */
694 if (queue > 0 &&
695 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
696 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
697 using_inactive_queue = true;
698 IWL_DEBUG_TX_QUEUES(mvm,
699 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
700 queue, mvmsta->sta_id, tid);
701 }
702
Liad Kaufman42db09c2016-05-02 14:01:14 +0300703 /* No free queue - we'll have to share */
704 if (queue <= 0) {
705 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
706 if (queue > 0) {
707 shared_queue = true;
708 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
709 }
710 }
711
Liad Kaufman9794c642015-08-19 17:34:28 +0300712 /*
Liad Kaufmancf961e12015-08-13 19:16:08 +0300713 * Mark TXQ as ready, even though it hasn't been fully configured yet,
714 * to make sure no one else takes it.
715 * This will allow avoiding re-acquiring the lock at the end of the
716 * configuration. On error we'll mark it back as free.
717 */
Liad Kaufman42db09c2016-05-02 14:01:14 +0300718 if ((queue > 0) && !shared_queue)
Liad Kaufmancf961e12015-08-13 19:16:08 +0300719 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman24afba72015-07-28 18:56:08 +0300720
Liad Kaufmand2515a92016-03-23 16:31:08 +0200721 spin_unlock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300722
Liad Kaufman42db09c2016-05-02 14:01:14 +0300723 /* This shouldn't happen - out of queues */
724 if (WARN_ON(queue <= 0)) {
725 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
726 tid, cfg.sta_id);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200727 return queue;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300728 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300729
730 /*
731 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
732 * but for configuring the SCD to send A-MPDUs we need to mark the queue
733 * as aggregatable.
734 * Mark all DATA queues as allowing to be aggregated at some point
735 */
Liad Kaufmand5216a22015-08-09 15:50:51 +0300736 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
737 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +0300738
Liad Kaufman9794c642015-08-19 17:34:28 +0300739 /*
740 * If this queue was previously inactive (idle) - we need to free it
741 * first
742 */
743 if (using_inactive_queue) {
744 struct iwl_scd_txq_cfg_cmd cmd = {
745 .scd_queue = queue,
Liad Kaufmanf7c692d2016-03-08 10:41:32 +0200746 .action = SCD_CFG_DISABLE_QUEUE,
Liad Kaufman9794c642015-08-19 17:34:28 +0300747 };
Liad Kaufmand55092b2016-08-03 18:41:27 +0300748 u8 txq_curr_ac;
Liad Kaufman9794c642015-08-19 17:34:28 +0300749
750 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
751
Liad Kaufman93f436e2015-08-31 13:41:26 +0300752 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmand55092b2016-08-03 18:41:27 +0300753 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
Liad Kaufman93f436e2015-08-31 13:41:26 +0300754 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
Liad Kaufmand55092b2016-08-03 18:41:27 +0300755 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[txq_curr_ac];
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200756 cmd.tid = mvm->queue_info[queue].txq_tid;
Liad Kaufman93f436e2015-08-31 13:41:26 +0300757 spin_unlock_bh(&mvm->queue_info_lock);
758
Liad Kaufman9794c642015-08-19 17:34:28 +0300759 /* Disable the queue */
Liad Kaufman8d98ae62016-02-02 16:02:46 +0200760 if (disable_agg_tids)
761 iwl_mvm_invalidate_sta_queue(mvm, queue,
762 disable_agg_tids, false);
Liad Kaufman9794c642015-08-19 17:34:28 +0300763 iwl_trans_txq_disable(mvm->trans, queue, false);
764 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
765 &cmd);
766 if (ret) {
767 IWL_ERR(mvm,
768 "Failed to free inactive queue %d (ret=%d)\n",
769 queue, ret);
770
771 /* Re-mark the inactive queue as inactive */
772 spin_lock_bh(&mvm->queue_info_lock);
773 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
774 spin_unlock_bh(&mvm->queue_info_lock);
775
776 return ret;
777 }
Liad Kaufman8d98ae62016-02-02 16:02:46 +0200778
779 /* If TXQ is allocated to another STA, update removal in FW */
780 if (cmd.sta_id != mvmsta->sta_id)
781 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
Liad Kaufman9794c642015-08-19 17:34:28 +0300782 }
783
Liad Kaufman42db09c2016-05-02 14:01:14 +0300784 IWL_DEBUG_TX_QUEUES(mvm,
785 "Allocating %squeue #%d to sta %d on tid %d\n",
786 shared_queue ? "shared " : "", queue,
787 mvmsta->sta_id, tid);
788
789 if (shared_queue) {
790 /* Disable any open aggs on this queue */
791 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
792
793 if (disable_agg_tids) {
794 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
795 queue);
796 iwl_mvm_invalidate_sta_queue(mvm, queue,
797 disable_agg_tids, false);
798 }
Liad Kaufman42db09c2016-05-02 14:01:14 +0300799 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300800
801 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
802 iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
803 wdg_timeout);
804
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200805 /*
806 * Mark queue as shared in transport if shared
807 * Note this has to be done after queue enablement because enablement
808 * can also set this value, and there is no indication there to shared
809 * queues
810 */
811 if (shared_queue)
812 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
813
Liad Kaufman24afba72015-07-28 18:56:08 +0300814 spin_lock_bh(&mvmsta->lock);
815 mvmsta->tid_data[tid].txq_id = queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300816 mvmsta->tid_data[tid].is_tid_active = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300817 mvmsta->tfd_queue_msk |= BIT(queue);
Liad Kaufman9794c642015-08-19 17:34:28 +0300818 queue_state = mvmsta->tid_data[tid].state;
Liad Kaufman24afba72015-07-28 18:56:08 +0300819
820 if (mvmsta->reserved_queue == queue)
821 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
822 spin_unlock_bh(&mvmsta->lock);
823
Liad Kaufman42db09c2016-05-02 14:01:14 +0300824 if (!shared_queue) {
825 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
826 if (ret)
827 goto out_err;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300828
Liad Kaufman42db09c2016-05-02 14:01:14 +0300829 /* If we need to re-enable aggregations... */
830 if (queue_state == IWL_AGG_ON) {
831 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
832 if (ret)
833 goto out_err;
834 }
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200835 } else {
836 /* Redirect queue, if needed */
837 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
838 wdg_timeout, false);
839 if (ret)
840 goto out_err;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300841 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300842
Liad Kaufman42db09c2016-05-02 14:01:14 +0300843 return 0;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300844
845out_err:
846 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
847
848 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300849}
850
Liad Kaufman19aefa42016-03-08 14:29:51 +0200851static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
852{
853 struct iwl_scd_txq_cfg_cmd cmd = {
854 .scd_queue = queue,
855 .action = SCD_CFG_UPDATE_QUEUE_TID,
856 };
857 s8 sta_id;
858 int tid;
859 unsigned long tid_bitmap;
860 int ret;
861
862 lockdep_assert_held(&mvm->mutex);
863
864 spin_lock_bh(&mvm->queue_info_lock);
865 sta_id = mvm->queue_info[queue].ra_sta_id;
866 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
867 spin_unlock_bh(&mvm->queue_info_lock);
868
869 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
870 return;
871
872 /* Find any TID for queue */
873 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
874 cmd.tid = tid;
875 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
876
877 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
Liad Kaufman341ca402016-09-18 14:51:59 +0300878 if (ret) {
Liad Kaufman19aefa42016-03-08 14:29:51 +0200879 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
880 queue, ret);
Liad Kaufman341ca402016-09-18 14:51:59 +0300881 return;
882 }
883
884 spin_lock_bh(&mvm->queue_info_lock);
885 mvm->queue_info[queue].txq_tid = tid;
886 spin_unlock_bh(&mvm->queue_info_lock);
887 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
888 queue, tid);
Liad Kaufman19aefa42016-03-08 14:29:51 +0200889}
890
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200891static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
892{
893 struct ieee80211_sta *sta;
894 struct iwl_mvm_sta *mvmsta;
895 s8 sta_id;
896 int tid = -1;
897 unsigned long tid_bitmap;
898 unsigned int wdg_timeout;
899 int ssn;
900 int ret = true;
901
902 lockdep_assert_held(&mvm->mutex);
903
904 spin_lock_bh(&mvm->queue_info_lock);
905 sta_id = mvm->queue_info[queue].ra_sta_id;
906 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
907 spin_unlock_bh(&mvm->queue_info_lock);
908
909 /* Find TID for queue, and make sure it is the only one on the queue */
910 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
911 if (tid_bitmap != BIT(tid)) {
912 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
913 queue, tid_bitmap);
914 return;
915 }
916
917 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
918 tid);
919
920 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
921 lockdep_is_held(&mvm->mutex));
922
923 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
924 return;
925
926 mvmsta = iwl_mvm_sta_from_mac80211(sta);
927 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
928
929 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
930
931 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
932 tid_to_mac80211_ac[tid], ssn,
933 wdg_timeout, true);
934 if (ret) {
935 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
936 return;
937 }
938
939 /* If aggs should be turned back on - do it */
940 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
Emmanuel Grumbach9cd70e82016-09-20 13:40:33 +0300941 struct iwl_mvm_add_sta_cmd cmd = {0};
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200942
943 mvmsta->tid_disable_agg &= ~BIT(tid);
944
945 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
946 cmd.sta_id = mvmsta->sta_id;
947 cmd.add_modify = STA_MODE_MODIFY;
948 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
949 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
950 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
951
952 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
953 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
954 if (!ret) {
955 IWL_DEBUG_TX_QUEUES(mvm,
956 "TXQ #%d is now aggregated again\n",
957 queue);
958
959 /* Mark queue intenally as aggregating again */
960 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
961 }
962 }
963
964 spin_lock_bh(&mvm->queue_info_lock);
965 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
966 spin_unlock_bh(&mvm->queue_info_lock);
967}
968
Liad Kaufman24afba72015-07-28 18:56:08 +0300969static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
970{
971 if (tid == IWL_MAX_TID_COUNT)
972 return IEEE80211_AC_VO; /* MGMT */
973
974 return tid_to_mac80211_ac[tid];
975}
976
977static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
978 struct ieee80211_sta *sta, int tid)
979{
980 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
981 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
982 struct sk_buff *skb;
983 struct ieee80211_hdr *hdr;
984 struct sk_buff_head deferred_tx;
985 u8 mac_queue;
986 bool no_queue = false; /* Marks if there is a problem with the queue */
987 u8 ac;
988
989 lockdep_assert_held(&mvm->mutex);
990
991 skb = skb_peek(&tid_data->deferred_tx_frames);
992 if (!skb)
993 return;
994 hdr = (void *)skb->data;
995
996 ac = iwl_mvm_tid_to_ac_queue(tid);
997 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
998
999 if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
1000 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1001 IWL_ERR(mvm,
1002 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1003 mvmsta->sta_id, tid);
1004
1005 /*
1006 * Mark queue as problematic so later the deferred traffic is
1007 * freed, as we can do nothing with it
1008 */
1009 no_queue = true;
1010 }
1011
1012 __skb_queue_head_init(&deferred_tx);
1013
Liad Kaufmand2515a92016-03-23 16:31:08 +02001014 /* Disable bottom-halves when entering TX path */
1015 local_bh_disable();
Liad Kaufman24afba72015-07-28 18:56:08 +03001016 spin_lock(&mvmsta->lock);
1017 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
1018 spin_unlock(&mvmsta->lock);
1019
Liad Kaufman24afba72015-07-28 18:56:08 +03001020 while ((skb = __skb_dequeue(&deferred_tx)))
1021 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1022 ieee80211_free_txskb(mvm->hw, skb);
1023 local_bh_enable();
1024
1025 /* Wake queue */
1026 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1027}
1028
1029void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1030{
1031 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1032 add_stream_wk);
1033 struct ieee80211_sta *sta;
1034 struct iwl_mvm_sta *mvmsta;
1035 unsigned long deferred_tid_traffic;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001036 int queue, sta_id, tid;
Liad Kaufman24afba72015-07-28 18:56:08 +03001037
Liad Kaufman9794c642015-08-19 17:34:28 +03001038 /* Check inactivity of queues */
1039 iwl_mvm_inactivity_check(mvm);
1040
Liad Kaufman24afba72015-07-28 18:56:08 +03001041 mutex_lock(&mvm->mutex);
1042
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001043 /* Reconfigure queues requiring reconfiguation */
1044 for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) {
1045 bool reconfig;
Liad Kaufman19aefa42016-03-08 14:29:51 +02001046 bool change_owner;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001047
1048 spin_lock_bh(&mvm->queue_info_lock);
1049 reconfig = (mvm->queue_info[queue].status ==
1050 IWL_MVM_QUEUE_RECONFIGURING);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001051
1052 /*
1053 * We need to take into account a situation in which a TXQ was
1054 * allocated to TID x, and then turned shared by adding TIDs y
1055 * and z. If TID x becomes inactive and is removed from the TXQ,
1056 * ownership must be given to one of the remaining TIDs.
1057 * This is mainly because if TID x continues - a new queue can't
1058 * be allocated for it as long as it is an owner of another TXQ.
1059 */
1060 change_owner = !(mvm->queue_info[queue].tid_bitmap &
1061 BIT(mvm->queue_info[queue].txq_tid)) &&
1062 (mvm->queue_info[queue].status ==
1063 IWL_MVM_QUEUE_SHARED);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001064 spin_unlock_bh(&mvm->queue_info_lock);
1065
1066 if (reconfig)
1067 iwl_mvm_unshare_queue(mvm, queue);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001068 else if (change_owner)
1069 iwl_mvm_change_queue_owner(mvm, queue);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001070 }
1071
Liad Kaufman24afba72015-07-28 18:56:08 +03001072 /* Go over all stations with deferred traffic */
1073 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1074 IWL_MVM_STATION_COUNT) {
1075 clear_bit(sta_id, mvm->sta_deferred_frames);
1076 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1077 lockdep_is_held(&mvm->mutex));
1078 if (IS_ERR_OR_NULL(sta))
1079 continue;
1080
1081 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1082 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1083
1084 for_each_set_bit(tid, &deferred_tid_traffic,
1085 IWL_MAX_TID_COUNT + 1)
1086 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1087 }
1088
1089 mutex_unlock(&mvm->mutex);
1090}
1091
1092static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001093 struct ieee80211_sta *sta,
1094 enum nl80211_iftype vif_type)
Liad Kaufman24afba72015-07-28 18:56:08 +03001095{
1096 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1097 int queue;
1098
Liad Kaufman9794c642015-08-19 17:34:28 +03001099 /*
1100 * Check for inactive queues, so we don't reach a situation where we
1101 * can't add a STA due to a shortage in queues that doesn't really exist
1102 */
1103 iwl_mvm_inactivity_check(mvm);
1104
Liad Kaufman24afba72015-07-28 18:56:08 +03001105 spin_lock_bh(&mvm->queue_info_lock);
1106
1107 /* Make sure we have free resources for this STA */
Liad Kaufmand5216a22015-08-09 15:50:51 +03001108 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1109 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
Liad Kaufmancf961e12015-08-13 19:16:08 +03001110 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1111 IWL_MVM_QUEUE_FREE))
Liad Kaufmand5216a22015-08-09 15:50:51 +03001112 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1113 else
Liad Kaufman9794c642015-08-19 17:34:28 +03001114 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1115 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001116 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +03001117 if (queue < 0) {
1118 spin_unlock_bh(&mvm->queue_info_lock);
1119 IWL_ERR(mvm, "No available queues for new station\n");
1120 return -ENOSPC;
1121 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03001122 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman24afba72015-07-28 18:56:08 +03001123
1124 spin_unlock_bh(&mvm->queue_info_lock);
1125
1126 mvmsta->reserved_queue = queue;
1127
1128 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1129 queue, mvmsta->sta_id);
1130
1131 return 0;
1132}
1133
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001134/*
1135 * In DQA mode, after a HW restart the queues should be allocated as before, in
1136 * order to avoid race conditions when there are shared queues. This function
1137 * does the re-mapping and queue allocation.
1138 *
1139 * Note that re-enabling aggregations isn't done in this function.
1140 */
1141static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1142 struct iwl_mvm_sta *mvm_sta)
1143{
1144 unsigned int wdg_timeout =
1145 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1146 int i;
1147 struct iwl_trans_txq_scd_cfg cfg = {
1148 .sta_id = mvm_sta->sta_id,
1149 .frame_limit = IWL_FRAME_LIMIT,
1150 };
1151
1152 /* Make sure reserved queue is still marked as such (or allocated) */
1153 mvm->queue_info[mvm_sta->reserved_queue].status =
1154 IWL_MVM_QUEUE_RESERVED;
1155
1156 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1157 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1158 int txq_id = tid_data->txq_id;
1159 int ac;
1160 u8 mac_queue;
1161
1162 if (txq_id == IEEE80211_INVAL_HW_QUEUE)
1163 continue;
1164
1165 skb_queue_head_init(&tid_data->deferred_tx_frames);
1166
1167 ac = tid_to_mac80211_ac[i];
1168 mac_queue = mvm_sta->vif->hw_queue[ac];
1169
1170 cfg.tid = i;
1171 cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
1172 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1173 txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1174
1175 IWL_DEBUG_TX_QUEUES(mvm,
1176 "Re-mapping sta %d tid %d to queue %d\n",
1177 mvm_sta->sta_id, i, txq_id);
1178
1179 iwl_mvm_enable_txq(mvm, txq_id, mac_queue,
1180 IEEE80211_SEQ_TO_SN(tid_data->seq_number),
1181 &cfg, wdg_timeout);
1182
1183 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1184 }
1185
1186 atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
1187}
1188
Johannes Berg8ca151b2013-01-24 14:25:36 +01001189int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1190 struct ieee80211_vif *vif,
1191 struct ieee80211_sta *sta)
1192{
1193 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001194 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharona571f5f2015-12-07 12:50:58 +02001195 struct iwl_mvm_rxq_dup_data *dup_data;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001196 int i, ret, sta_id;
1197
1198 lockdep_assert_held(&mvm->mutex);
1199
1200 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
Eliad Pellerb92e6612014-01-23 17:58:23 +02001201 sta_id = iwl_mvm_find_free_sta_id(mvm,
1202 ieee80211_vif_type_p2p(vif));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001203 else
1204 sta_id = mvm_sta->sta_id;
1205
Johannes Berg36f46312015-03-10 20:32:08 +01001206 if (sta_id == IWL_MVM_STATION_COUNT)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001207 return -ENOSPC;
1208
1209 spin_lock_init(&mvm_sta->lock);
1210
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001211 /* In DQA mode, if this is a HW restart, re-alloc existing queues */
1212 if (iwl_mvm_is_dqa_supported(mvm) &&
1213 test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1214 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
1215 goto update_fw;
1216 }
1217
Johannes Berg8ca151b2013-01-24 14:25:36 +01001218 mvm_sta->sta_id = sta_id;
1219 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1220 mvmvif->color);
1221 mvm_sta->vif = vif;
1222 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001223 mvm_sta->tx_protection = 0;
1224 mvm_sta->tt_tx_protection = false;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001225
1226 /* HW restart, don't assume the memory has been zeroed */
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001227 atomic_set(&mvm->pending_frames[sta_id], 0);
Liad Kaufman69191af2015-09-01 18:50:22 +03001228 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
Johannes Berg8ca151b2013-01-24 14:25:36 +01001229 mvm_sta->tfd_queue_msk = 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001230
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001231 /*
1232 * Allocate new queues for a TDLS station, unless we're in DQA mode,
1233 * and then they'll be allocated dynamically
1234 */
1235 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) {
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001236 ret = iwl_mvm_tdls_sta_init(mvm, sta);
1237 if (ret)
1238 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +03001239 } else if (!iwl_mvm_is_dqa_supported(mvm)) {
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001240 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1241 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
1242 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
1243 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001244
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001245 /* for HW restart - reset everything but the sequence number */
Liad Kaufman24afba72015-07-28 18:56:08 +03001246 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001247 u16 seq = mvm_sta->tid_data[i].seq_number;
1248 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1249 mvm_sta->tid_data[i].seq_number = seq;
Liad Kaufman24afba72015-07-28 18:56:08 +03001250
1251 if (!iwl_mvm_is_dqa_supported(mvm))
1252 continue;
1253
1254 /*
1255 * Mark all queues for this STA as unallocated and defer TX
1256 * frames until the queue is allocated
1257 */
1258 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
1259 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001260 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001261 mvm_sta->deferred_traffic_tid_map = 0;
Eyal Shapiraefed6642014-09-14 15:58:53 +03001262 mvm_sta->agg_tids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001263
Sara Sharona571f5f2015-12-07 12:50:58 +02001264 if (iwl_mvm_has_new_rx_api(mvm) &&
1265 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1266 dup_data = kcalloc(mvm->trans->num_rx_queues,
1267 sizeof(*dup_data),
1268 GFP_KERNEL);
1269 if (!dup_data)
1270 return -ENOMEM;
1271 mvm_sta->dup_data = dup_data;
1272 }
1273
Liad Kaufman24afba72015-07-28 18:56:08 +03001274 if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufmand5216a22015-08-09 15:50:51 +03001275 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1276 ieee80211_vif_type_p2p(vif));
Liad Kaufman24afba72015-07-28 18:56:08 +03001277 if (ret)
1278 goto err;
1279 }
1280
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001281update_fw:
Liad Kaufman24afba72015-07-28 18:56:08 +03001282 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001283 if (ret)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001284 goto err;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001285
Johannes Berg9e848012014-08-04 14:33:42 +02001286 if (vif->type == NL80211_IFTYPE_STATION) {
1287 if (!sta->tdls) {
1288 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT);
1289 mvmvif->ap_sta_id = sta_id;
1290 } else {
1291 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT);
1292 }
1293 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001294
1295 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1296
1297 return 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001298
1299err:
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001300 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
1301 iwl_mvm_tdls_sta_deinit(mvm, sta);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001302 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001303}
1304
1305int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1306 bool drain)
1307{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001308 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01001309 int ret;
1310 u32 status;
1311
1312 lockdep_assert_held(&mvm->mutex);
1313
1314 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1315 cmd.sta_id = mvmsta->sta_id;
1316 cmd.add_modify = STA_MODE_MODIFY;
1317 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1318 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1319
1320 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02001321 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1322 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001323 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001324 if (ret)
1325 return ret;
1326
Sara Sharon837c4da2016-01-07 16:50:45 +02001327 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001328 case ADD_STA_SUCCESS:
1329 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1330 mvmsta->sta_id);
1331 break;
1332 default:
1333 ret = -EIO;
1334 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1335 mvmsta->sta_id);
1336 break;
1337 }
1338
1339 return ret;
1340}
1341
1342/*
1343 * Remove a station from the FW table. Before sending the command to remove
1344 * the station validate that the station is indeed known to the driver (sanity
1345 * only).
1346 */
1347static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1348{
1349 struct ieee80211_sta *sta;
1350 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1351 .sta_id = sta_id,
1352 };
1353 int ret;
1354
1355 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1356 lockdep_is_held(&mvm->mutex));
1357
1358 /* Note: internal stations are marked as error values */
1359 if (!sta) {
1360 IWL_ERR(mvm, "Invalid station id\n");
1361 return -EINVAL;
1362 }
1363
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03001364 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01001365 sizeof(rm_sta_cmd), &rm_sta_cmd);
1366 if (ret) {
1367 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1368 return ret;
1369 }
1370
1371 return 0;
1372}
1373
1374void iwl_mvm_sta_drained_wk(struct work_struct *wk)
1375{
1376 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
1377 u8 sta_id;
1378
1379 /*
1380 * The mutex is needed because of the SYNC cmd, but not only: if the
1381 * work would run concurrently with iwl_mvm_rm_sta, it would run before
1382 * iwl_mvm_rm_sta sets the station as busy, and exit. Then
1383 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
1384 * that later.
1385 */
1386 mutex_lock(&mvm->mutex);
1387
1388 for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
1389 int ret;
1390 struct ieee80211_sta *sta =
1391 rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1392 lockdep_is_held(&mvm->mutex));
1393
Johannes Berg1ddbbb02013-12-04 22:39:17 +01001394 /*
1395 * This station is in use or RCU-removed; the latter happens in
1396 * managed mode, where mac80211 removes the station before we
1397 * can remove it from firmware (we can only do that after the
1398 * MAC is marked unassociated), and possibly while the deauth
1399 * frame to disconnect from the AP is still queued. Then, the
1400 * station pointer is -ENOENT when the last skb is reclaimed.
1401 */
1402 if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001403 continue;
1404
1405 if (PTR_ERR(sta) == -EINVAL) {
1406 IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
1407 sta_id);
1408 continue;
1409 }
1410
1411 if (!sta) {
1412 IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
1413 sta_id);
1414 continue;
1415 }
1416
1417 WARN_ON(PTR_ERR(sta) != -EBUSY);
1418 /* This station was removed and we waited until it got drained,
1419 * we can now proceed and remove it.
1420 */
1421 ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1422 if (ret) {
1423 IWL_ERR(mvm,
1424 "Couldn't remove sta %d after it was drained\n",
1425 sta_id);
1426 continue;
1427 }
Monam Agarwalc531c772014-03-24 00:05:56 +05301428 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001429 clear_bit(sta_id, mvm->sta_drained);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001430
1431 if (mvm->tfd_drained[sta_id]) {
1432 unsigned long i, msk = mvm->tfd_drained[sta_id];
1433
Emmanuel Grumbacha4ca3ed2015-01-20 17:07:10 +02001434 for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
Arik Nemtsov06ecdba2015-10-12 14:47:11 +03001435 iwl_mvm_disable_txq(mvm, i, i,
1436 IWL_MAX_TID_COUNT, 0);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001437
1438 mvm->tfd_drained[sta_id] = 0;
1439 IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
1440 sta_id, msk);
1441 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001442 }
1443
1444 mutex_unlock(&mvm->mutex);
1445}
1446
Liad Kaufman24afba72015-07-28 18:56:08 +03001447static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1448 struct ieee80211_vif *vif,
1449 struct iwl_mvm_sta *mvm_sta)
1450{
1451 int ac;
1452 int i;
1453
1454 lockdep_assert_held(&mvm->mutex);
1455
1456 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1457 if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
1458 continue;
1459
1460 ac = iwl_mvm_tid_to_ac_queue(i);
1461 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1462 vif->hw_queue[ac], i, 0);
1463 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
1464 }
1465}
1466
Johannes Berg8ca151b2013-01-24 14:25:36 +01001467int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1468 struct ieee80211_vif *vif,
1469 struct ieee80211_sta *sta)
1470{
1471 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001472 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001473 int ret;
1474
1475 lockdep_assert_held(&mvm->mutex);
1476
Sara Sharona571f5f2015-12-07 12:50:58 +02001477 if (iwl_mvm_has_new_rx_api(mvm))
1478 kfree(mvm_sta->dup_data);
1479
Liad Kaufmana6f035a2015-08-24 15:23:14 +03001480 if ((vif->type == NL80211_IFTYPE_STATION &&
1481 mvmvif->ap_sta_id == mvm_sta->sta_id) ||
1482 iwl_mvm_is_dqa_supported(mvm)){
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001483 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1484 if (ret)
1485 return ret;
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02001486 /* flush its queues here since we are freeing mvm_sta */
Luca Coelho5888a402015-10-06 09:54:57 +03001487 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001488 if (ret)
1489 return ret;
1490 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
1491 mvm_sta->tfd_queue_msk);
1492 if (ret)
1493 return ret;
1494 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02001495
Liad Kaufman24afba72015-07-28 18:56:08 +03001496 /* If DQA is supported - the queues can be disabled now */
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001497 if (iwl_mvm_is_dqa_supported(mvm)) {
1498 u8 reserved_txq = mvm_sta->reserved_queue;
1499 enum iwl_mvm_queue_status *status;
1500
Liad Kaufman24afba72015-07-28 18:56:08 +03001501 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
1502
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001503 /*
1504 * If no traffic has gone through the reserved TXQ - it
1505 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1506 * should be manually marked as free again
1507 */
1508 spin_lock_bh(&mvm->queue_info_lock);
1509 status = &mvm->queue_info[reserved_txq].status;
1510 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1511 (*status != IWL_MVM_QUEUE_FREE),
1512 "sta_id %d reserved txq %d status %d",
1513 mvm_sta->sta_id, reserved_txq, *status)) {
1514 spin_unlock_bh(&mvm->queue_info_lock);
1515 return -EINVAL;
1516 }
1517
1518 *status = IWL_MVM_QUEUE_FREE;
1519 spin_unlock_bh(&mvm->queue_info_lock);
1520 }
1521
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001522 if (vif->type == NL80211_IFTYPE_STATION &&
1523 mvmvif->ap_sta_id == mvm_sta->sta_id) {
1524 /* if associated - we can't remove the AP STA now */
1525 if (vif->bss_conf.assoc)
1526 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001527
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001528 /* unassoc - go ahead - remove the AP STA now */
1529 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
Eliad Peller37577fe2013-12-05 17:19:39 +02001530
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001531 /* clear d0i3_ap_sta_id if no longer relevant */
1532 if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
1533 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1534 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001535 }
1536
1537 /*
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03001538 * This shouldn't happen - the TDLS channel switch should be canceled
1539 * before the STA is removed.
1540 */
1541 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) {
1542 mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
1543 cancel_delayed_work(&mvm->tdls_cs.dwork);
1544 }
1545
1546 /*
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001547 * Make sure that the tx response code sees the station as -EBUSY and
1548 * calls the drain worker.
1549 */
1550 spin_lock_bh(&mvm_sta->lock);
1551 /*
Johannes Berg8ca151b2013-01-24 14:25:36 +01001552 * There are frames pending on the AC queues for this station.
1553 * We need to wait until all the frames are drained...
1554 */
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001555 if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001556 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
1557 ERR_PTR(-EBUSY));
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001558 spin_unlock_bh(&mvm_sta->lock);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001559
1560 /* disable TDLS sta queues on drain complete */
1561 if (sta->tdls) {
1562 mvm->tfd_drained[mvm_sta->sta_id] =
1563 mvm_sta->tfd_queue_msk;
1564 IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
1565 mvm_sta->sta_id);
1566 }
1567
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001568 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001569 } else {
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001570 spin_unlock_bh(&mvm_sta->lock);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001571
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001572 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001573 iwl_mvm_tdls_sta_deinit(mvm, sta);
1574
Johannes Berg8ca151b2013-01-24 14:25:36 +01001575 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
Monam Agarwalc531c772014-03-24 00:05:56 +05301576 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001577 }
1578
1579 return ret;
1580}
1581
1582int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1583 struct ieee80211_vif *vif,
1584 u8 sta_id)
1585{
1586 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1587
1588 lockdep_assert_held(&mvm->mutex);
1589
Monam Agarwalc531c772014-03-24 00:05:56 +05301590 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001591 return ret;
1592}
1593
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001594int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1595 struct iwl_mvm_int_sta *sta,
1596 u32 qmask, enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001597{
1598 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Eliad Pellerb92e6612014-01-23 17:58:23 +02001599 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001600 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
1601 return -ENOSPC;
1602 }
1603
1604 sta->tfd_queue_msk = qmask;
1605
1606 /* put a non-NULL value so iterating over the stations won't stop */
1607 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1608 return 0;
1609}
1610
Johannes Berg712b24a2014-08-04 14:14:14 +02001611static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
1612 struct iwl_mvm_int_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001613{
Monam Agarwalc531c772014-03-24 00:05:56 +05301614 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001615 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
1616 sta->sta_id = IWL_MVM_STATION_COUNT;
1617}
1618
1619static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1620 struct iwl_mvm_int_sta *sta,
1621 const u8 *addr,
1622 u16 mac_id, u16 color)
1623{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001624 struct iwl_mvm_add_sta_cmd cmd;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001625 int ret;
1626 u32 status;
1627
1628 lockdep_assert_held(&mvm->mutex);
1629
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001630 memset(&cmd, 0, sizeof(cmd));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001631 cmd.sta_id = sta->sta_id;
1632 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1633 color));
1634
1635 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
Liad Kaufmancf0cda12015-09-24 10:44:12 +02001636 cmd.tid_disable_tx = cpu_to_le16(0xffff);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001637
1638 if (addr)
1639 memcpy(cmd.addr, addr, ETH_ALEN);
1640
Sara Sharon854c5702016-01-26 13:17:47 +02001641 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1642 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001643 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001644 if (ret)
1645 return ret;
1646
Sara Sharon837c4da2016-01-07 16:50:45 +02001647 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001648 case ADD_STA_SUCCESS:
1649 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1650 return 0;
1651 default:
1652 ret = -EIO;
1653 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1654 status);
1655 break;
1656 }
1657 return ret;
1658}
1659
1660int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1661{
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001662 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1663 mvm->cfg->base_params->wd_timeout :
1664 IWL_WATCHDOG_DISABLED;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001665 int ret;
1666
1667 lockdep_assert_held(&mvm->mutex);
1668
Ariej Marjieh7da91b02014-07-07 12:09:40 +03001669 /* Map Aux queue to fifo - needs to happen before adding Aux station */
Liad Kaufman28d07932015-09-01 16:36:25 +03001670 if (!iwl_mvm_is_dqa_supported(mvm))
1671 iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
1672 IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
Ariej Marjieh7da91b02014-07-07 12:09:40 +03001673
1674 /* Allocate aux station and assign to it the aux queue */
1675 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
Eliad Pellerb92e6612014-01-23 17:58:23 +02001676 NL80211_IFTYPE_UNSPECIFIED);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001677 if (ret)
1678 return ret;
1679
Liad Kaufman28d07932015-09-01 16:36:25 +03001680 if (iwl_mvm_is_dqa_supported(mvm)) {
1681 struct iwl_trans_txq_scd_cfg cfg = {
1682 .fifo = IWL_MVM_TX_FIFO_MCAST,
1683 .sta_id = mvm->aux_sta.sta_id,
1684 .tid = IWL_MAX_TID_COUNT,
1685 .aggregate = false,
1686 .frame_limit = IWL_FRAME_LIMIT,
1687 };
1688
1689 iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
1690 wdg_timeout);
1691 }
1692
Johannes Berg8ca151b2013-01-24 14:25:36 +01001693 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1694 MAC_INDEX_AUX, 0);
1695
1696 if (ret)
1697 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1698 return ret;
1699}
1700
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001701int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1702{
1703 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1704
1705 lockdep_assert_held(&mvm->mutex);
1706 return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
1707 mvmvif->id, 0);
1708}
1709
1710int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1711{
1712 int ret;
1713
1714 lockdep_assert_held(&mvm->mutex);
1715
1716 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
1717 if (ret)
1718 IWL_WARN(mvm, "Failed sending remove station\n");
1719
1720 return ret;
1721}
1722
1723void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
1724{
1725 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
1726}
1727
Johannes Berg712b24a2014-08-04 14:14:14 +02001728void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
1729{
1730 lockdep_assert_held(&mvm->mutex);
1731
1732 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1733}
1734
Johannes Berg8ca151b2013-01-24 14:25:36 +01001735/*
1736 * Send the add station command for the vif's broadcast station.
1737 * Assumes that the station was already allocated.
1738 *
1739 * @mvm: the mvm component
1740 * @vif: the interface to which the broadcast station is added
1741 * @bsta: the broadcast station to add.
1742 */
Johannes Berg013290a2014-08-04 13:38:48 +02001743int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001744{
1745 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02001746 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg5023d962013-07-31 14:07:43 +02001747 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
Johannes Berga4243402014-01-20 23:46:38 +01001748 const u8 *baddr = _baddr;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001749
1750 lockdep_assert_held(&mvm->mutex);
1751
Liad Kaufmande24f632015-08-04 15:19:18 +03001752 if (iwl_mvm_is_dqa_supported(mvm)) {
1753 struct iwl_trans_txq_scd_cfg cfg = {
1754 .fifo = IWL_MVM_TX_FIFO_VO,
1755 .sta_id = mvmvif->bcast_sta.sta_id,
1756 .tid = IWL_MAX_TID_COUNT,
1757 .aggregate = false,
1758 .frame_limit = IWL_FRAME_LIMIT,
1759 };
1760 unsigned int wdg_timeout =
1761 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1762 int queue;
1763
1764 if ((vif->type == NL80211_IFTYPE_AP) &&
1765 (mvmvif->bcast_sta.tfd_queue_msk &
1766 BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)))
1767 queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
Liad Kaufman4c965132015-08-09 19:26:56 +03001768 else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) &&
1769 (mvmvif->bcast_sta.tfd_queue_msk &
1770 BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)))
1771 queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
Liad Kaufmande24f632015-08-04 15:19:18 +03001772 else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
1773 return -EINVAL;
1774
1775 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
1776 wdg_timeout);
1777 }
1778
Johannes Berg5023d962013-07-31 14:07:43 +02001779 if (vif->type == NL80211_IFTYPE_ADHOC)
1780 baddr = vif->bss_conf.bssid;
1781
Johannes Berg8ca151b2013-01-24 14:25:36 +01001782 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
1783 return -ENOSPC;
1784
1785 return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
1786 mvmvif->id, mvmvif->color);
1787}
1788
1789/* Send the FW a request to remove the station from it's internal data
1790 * structures, but DO NOT remove the entry from the local data structures. */
Johannes Berg013290a2014-08-04 13:38:48 +02001791int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001792{
Johannes Berg013290a2014-08-04 13:38:48 +02001793 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001794 int ret;
1795
1796 lockdep_assert_held(&mvm->mutex);
1797
Johannes Berg013290a2014-08-04 13:38:48 +02001798 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001799 if (ret)
1800 IWL_WARN(mvm, "Failed sending remove station\n");
1801 return ret;
1802}
1803
Johannes Berg013290a2014-08-04 13:38:48 +02001804int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1805{
1806 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Liad Kaufmande24f632015-08-04 15:19:18 +03001807 u32 qmask = 0;
Johannes Berg013290a2014-08-04 13:38:48 +02001808
1809 lockdep_assert_held(&mvm->mutex);
1810
Liad Kaufmande24f632015-08-04 15:19:18 +03001811 if (!iwl_mvm_is_dqa_supported(mvm))
1812 qmask = iwl_mvm_mac_get_queues_mask(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02001813
Liad Kaufmande24f632015-08-04 15:19:18 +03001814 if (vif->type == NL80211_IFTYPE_AP) {
1815 /*
1816 * The firmware defines the TFD queue mask to only be relevant
1817 * for *unicast* queues, so the multicast (CAB) queue shouldn't
1818 * be included.
1819 */
Johannes Berg013290a2014-08-04 13:38:48 +02001820 qmask &= ~BIT(vif->cab_queue);
1821
Liad Kaufmande24f632015-08-04 15:19:18 +03001822 if (iwl_mvm_is_dqa_supported(mvm))
1823 qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
Liad Kaufman4c965132015-08-09 19:26:56 +03001824 } else if (iwl_mvm_is_dqa_supported(mvm) &&
1825 vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1826 qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
Liad Kaufmande24f632015-08-04 15:19:18 +03001827 }
1828
Johannes Berg013290a2014-08-04 13:38:48 +02001829 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
1830 ieee80211_vif_type_p2p(vif));
1831}
1832
Johannes Berg8ca151b2013-01-24 14:25:36 +01001833/* Allocate a new station entry for the broadcast station to the given vif,
1834 * and send it to the FW.
1835 * Note that each P2P mac should have its own broadcast station.
1836 *
1837 * @mvm: the mvm component
1838 * @vif: the interface to which the broadcast station is added
1839 * @bsta: the broadcast station to add. */
Johannes Berg013290a2014-08-04 13:38:48 +02001840int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001841{
1842 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02001843 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001844 int ret;
1845
1846 lockdep_assert_held(&mvm->mutex);
1847
Johannes Berg013290a2014-08-04 13:38:48 +02001848 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001849 if (ret)
1850 return ret;
1851
Johannes Berg013290a2014-08-04 13:38:48 +02001852 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001853
1854 if (ret)
1855 iwl_mvm_dealloc_int_sta(mvm, bsta);
Johannes Berg013290a2014-08-04 13:38:48 +02001856
Johannes Berg8ca151b2013-01-24 14:25:36 +01001857 return ret;
1858}
1859
Johannes Berg013290a2014-08-04 13:38:48 +02001860void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1861{
1862 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1863
1864 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
1865}
1866
Johannes Berg8ca151b2013-01-24 14:25:36 +01001867/*
1868 * Send the FW a request to remove the station from it's internal data
1869 * structures, and in addition remove it from the local data structure.
1870 */
Johannes Berg013290a2014-08-04 13:38:48 +02001871int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001872{
1873 int ret;
1874
1875 lockdep_assert_held(&mvm->mutex);
1876
Johannes Berg013290a2014-08-04 13:38:48 +02001877 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001878
Johannes Berg013290a2014-08-04 13:38:48 +02001879 iwl_mvm_dealloc_bcast_sta(mvm, vif);
1880
Johannes Berg8ca151b2013-01-24 14:25:36 +01001881 return ret;
1882}
1883
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03001884#define IWL_MAX_RX_BA_SESSIONS 16
1885
Sara Sharonb915c102016-03-23 16:32:02 +02001886static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
Sara Sharon10b2b202016-03-20 16:23:41 +02001887{
Sara Sharonb915c102016-03-23 16:32:02 +02001888 struct iwl_mvm_delba_notif notif = {
1889 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
1890 .metadata.sync = 1,
1891 .delba.baid = baid,
Sara Sharon10b2b202016-03-20 16:23:41 +02001892 };
Sara Sharonb915c102016-03-23 16:32:02 +02001893 iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
1894};
Sara Sharon10b2b202016-03-20 16:23:41 +02001895
Sara Sharonb915c102016-03-23 16:32:02 +02001896static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
1897 struct iwl_mvm_baid_data *data)
1898{
1899 int i;
1900
1901 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
1902
1903 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
1904 int j;
1905 struct iwl_mvm_reorder_buffer *reorder_buf =
1906 &data->reorder_buf[i];
1907
Sara Sharon06904052016-02-28 20:28:17 +02001908 spin_lock_bh(&reorder_buf->lock);
1909 if (likely(!reorder_buf->num_stored)) {
1910 spin_unlock_bh(&reorder_buf->lock);
Sara Sharonb915c102016-03-23 16:32:02 +02001911 continue;
Sara Sharon06904052016-02-28 20:28:17 +02001912 }
Sara Sharonb915c102016-03-23 16:32:02 +02001913
1914 /*
1915 * This shouldn't happen in regular DELBA since the internal
1916 * delBA notification should trigger a release of all frames in
1917 * the reorder buffer.
1918 */
1919 WARN_ON(1);
1920
1921 for (j = 0; j < reorder_buf->buf_size; j++)
1922 __skb_queue_purge(&reorder_buf->entries[j]);
Sara Sharon06904052016-02-28 20:28:17 +02001923 /*
1924 * Prevent timer re-arm. This prevents a very far fetched case
1925 * where we timed out on the notification. There may be prior
1926 * RX frames pending in the RX queue before the notification
1927 * that might get processed between now and the actual deletion
1928 * and we would re-arm the timer although we are deleting the
1929 * reorder buffer.
1930 */
1931 reorder_buf->removed = true;
1932 spin_unlock_bh(&reorder_buf->lock);
1933 del_timer_sync(&reorder_buf->reorder_timer);
Sara Sharonb915c102016-03-23 16:32:02 +02001934 }
1935}
1936
1937static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
1938 u32 sta_id,
1939 struct iwl_mvm_baid_data *data,
1940 u16 ssn, u8 buf_size)
1941{
1942 int i;
1943
1944 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
1945 struct iwl_mvm_reorder_buffer *reorder_buf =
1946 &data->reorder_buf[i];
1947 int j;
1948
1949 reorder_buf->num_stored = 0;
1950 reorder_buf->head_sn = ssn;
1951 reorder_buf->buf_size = buf_size;
Sara Sharon06904052016-02-28 20:28:17 +02001952 /* rx reorder timer */
1953 reorder_buf->reorder_timer.function =
1954 iwl_mvm_reorder_timer_expired;
1955 reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
1956 init_timer(&reorder_buf->reorder_timer);
1957 spin_lock_init(&reorder_buf->lock);
1958 reorder_buf->mvm = mvm;
Sara Sharonb915c102016-03-23 16:32:02 +02001959 reorder_buf->queue = i;
1960 reorder_buf->sta_id = sta_id;
1961 for (j = 0; j < reorder_buf->buf_size; j++)
1962 __skb_queue_head_init(&reorder_buf->entries[j]);
1963 }
Sara Sharon10b2b202016-03-20 16:23:41 +02001964}
1965
Johannes Berg8ca151b2013-01-24 14:25:36 +01001966int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Sara Sharon10b2b202016-03-20 16:23:41 +02001967 int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001968{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001969 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001970 struct iwl_mvm_add_sta_cmd cmd = {};
Sara Sharon10b2b202016-03-20 16:23:41 +02001971 struct iwl_mvm_baid_data *baid_data = NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001972 int ret;
1973 u32 status;
1974
1975 lockdep_assert_held(&mvm->mutex);
1976
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03001977 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
1978 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
1979 return -ENOSPC;
1980 }
1981
Sara Sharon10b2b202016-03-20 16:23:41 +02001982 if (iwl_mvm_has_new_rx_api(mvm) && start) {
1983 /*
1984 * Allocate here so if allocation fails we can bail out early
1985 * before starting the BA session in the firmware
1986 */
Sara Sharonb915c102016-03-23 16:32:02 +02001987 baid_data = kzalloc(sizeof(*baid_data) +
1988 mvm->trans->num_rx_queues *
1989 sizeof(baid_data->reorder_buf[0]),
1990 GFP_KERNEL);
Sara Sharon10b2b202016-03-20 16:23:41 +02001991 if (!baid_data)
1992 return -ENOMEM;
1993 }
1994
Johannes Berg8ca151b2013-01-24 14:25:36 +01001995 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
1996 cmd.sta_id = mvm_sta->sta_id;
1997 cmd.add_modify = STA_MODE_MODIFY;
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03001998 if (start) {
1999 cmd.add_immediate_ba_tid = (u8) tid;
2000 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
Sara Sharon854c5702016-01-26 13:17:47 +02002001 cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002002 } else {
2003 cmd.remove_immediate_ba_tid = (u8) tid;
2004 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002005 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2006 STA_MODIFY_REMOVE_BA_TID;
2007
2008 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002009 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2010 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002011 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002012 if (ret)
Sara Sharon10b2b202016-03-20 16:23:41 +02002013 goto out_free;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002014
Sara Sharon837c4da2016-01-07 16:50:45 +02002015 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002016 case ADD_STA_SUCCESS:
Sara Sharon35263a02016-06-21 12:12:10 +03002017 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2018 start ? "start" : "stopp");
Johannes Berg8ca151b2013-01-24 14:25:36 +01002019 break;
2020 case ADD_STA_IMMEDIATE_BA_FAILURE:
2021 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2022 ret = -ENOSPC;
2023 break;
2024 default:
2025 ret = -EIO;
2026 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2027 start ? "start" : "stopp", status);
2028 break;
2029 }
2030
Sara Sharon10b2b202016-03-20 16:23:41 +02002031 if (ret)
2032 goto out_free;
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002033
Sara Sharon10b2b202016-03-20 16:23:41 +02002034 if (start) {
2035 u8 baid;
2036
2037 mvm->rx_ba_sessions++;
2038
2039 if (!iwl_mvm_has_new_rx_api(mvm))
2040 return 0;
2041
2042 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2043 ret = -EINVAL;
2044 goto out_free;
2045 }
2046 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2047 IWL_ADD_STA_BAID_SHIFT);
2048 baid_data->baid = baid;
2049 baid_data->timeout = timeout;
2050 baid_data->last_rx = jiffies;
Wei Yongjun72c240f2016-07-12 11:40:57 +00002051 setup_timer(&baid_data->session_timer,
2052 iwl_mvm_rx_agg_session_expired,
2053 (unsigned long)&mvm->baid_map[baid]);
Sara Sharon10b2b202016-03-20 16:23:41 +02002054 baid_data->mvm = mvm;
2055 baid_data->tid = tid;
2056 baid_data->sta_id = mvm_sta->sta_id;
2057
2058 mvm_sta->tid_to_baid[tid] = baid;
2059 if (timeout)
2060 mod_timer(&baid_data->session_timer,
2061 TU_TO_EXP_TIME(timeout * 2));
2062
Sara Sharonb915c102016-03-23 16:32:02 +02002063 iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
2064 baid_data, ssn, buf_size);
Sara Sharon10b2b202016-03-20 16:23:41 +02002065 /*
2066 * protect the BA data with RCU to cover a case where our
2067 * internal RX sync mechanism will timeout (not that it's
2068 * supposed to happen) and we will free the session data while
2069 * RX is being processed in parallel
2070 */
Sara Sharon35263a02016-06-21 12:12:10 +03002071 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2072 mvm_sta->sta_id, tid, baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002073 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2074 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
Sara Sharon60dec522016-06-21 14:14:08 +03002075 } else {
Sara Sharon10b2b202016-03-20 16:23:41 +02002076 u8 baid = mvm_sta->tid_to_baid[tid];
2077
Sara Sharon60dec522016-06-21 14:14:08 +03002078 if (mvm->rx_ba_sessions > 0)
2079 /* check that restart flow didn't zero the counter */
2080 mvm->rx_ba_sessions--;
Sara Sharon10b2b202016-03-20 16:23:41 +02002081 if (!iwl_mvm_has_new_rx_api(mvm))
2082 return 0;
2083
2084 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2085 return -EINVAL;
2086
2087 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2088 if (WARN_ON(!baid_data))
2089 return -EINVAL;
2090
2091 /* synchronize all rx queues so we can safely delete */
Sara Sharonb915c102016-03-23 16:32:02 +02002092 iwl_mvm_free_reorder(mvm, baid_data);
Sara Sharon10b2b202016-03-20 16:23:41 +02002093 del_timer_sync(&baid_data->session_timer);
Sara Sharon10b2b202016-03-20 16:23:41 +02002094 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2095 kfree_rcu(baid_data, rcu_head);
Sara Sharon35263a02016-06-21 12:12:10 +03002096 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002097 }
2098 return 0;
2099
2100out_free:
2101 kfree(baid_data);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002102 return ret;
2103}
2104
Liad Kaufman9794c642015-08-19 17:34:28 +03002105int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2106 int tid, u8 queue, bool start)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002107{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002108 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002109 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01002110 int ret;
2111 u32 status;
2112
2113 lockdep_assert_held(&mvm->mutex);
2114
2115 if (start) {
2116 mvm_sta->tfd_queue_msk |= BIT(queue);
2117 mvm_sta->tid_disable_agg &= ~BIT(tid);
2118 } else {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002119 /* In DQA-mode the queue isn't removed on agg termination */
2120 if (!iwl_mvm_is_dqa_supported(mvm))
2121 mvm_sta->tfd_queue_msk &= ~BIT(queue);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002122 mvm_sta->tid_disable_agg |= BIT(tid);
2123 }
2124
2125 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2126 cmd.sta_id = mvm_sta->sta_id;
2127 cmd.add_modify = STA_MODE_MODIFY;
2128 cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
2129 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2130 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2131
2132 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002133 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2134 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002135 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002136 if (ret)
2137 return ret;
2138
Sara Sharon837c4da2016-01-07 16:50:45 +02002139 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002140 case ADD_STA_SUCCESS:
2141 break;
2142 default:
2143 ret = -EIO;
2144 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2145 start ? "start" : "stopp", status);
2146 break;
2147 }
2148
2149 return ret;
2150}
2151
Emmanuel Grumbachb797e3f2014-03-06 14:49:36 +02002152const u8 tid_to_mac80211_ac[] = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002153 IEEE80211_AC_BE,
2154 IEEE80211_AC_BK,
2155 IEEE80211_AC_BK,
2156 IEEE80211_AC_BE,
2157 IEEE80211_AC_VI,
2158 IEEE80211_AC_VI,
2159 IEEE80211_AC_VO,
2160 IEEE80211_AC_VO,
Liad Kaufman9794c642015-08-19 17:34:28 +03002161 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002162};
2163
Johannes Berg3e56ead2013-02-15 22:23:18 +01002164static const u8 tid_to_ucode_ac[] = {
2165 AC_BE,
2166 AC_BK,
2167 AC_BK,
2168 AC_BE,
2169 AC_VI,
2170 AC_VI,
2171 AC_VO,
2172 AC_VO,
2173};
2174
Johannes Berg8ca151b2013-01-24 14:25:36 +01002175int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2176 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2177{
Johannes Berg5b577a92013-11-14 18:20:04 +01002178 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002179 struct iwl_mvm_tid_data *tid_data;
2180 int txq_id;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002181 int ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002182
2183 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2184 return -EINVAL;
2185
2186 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2187 IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
2188 mvmsta->tid_data[tid].state);
2189 return -ENXIO;
2190 }
2191
2192 lockdep_assert_held(&mvm->mutex);
2193
Arik Nemtsovb2492502014-03-13 12:21:50 +02002194 spin_lock_bh(&mvmsta->lock);
2195
2196 /* possible race condition - we entered D0i3 while starting agg */
2197 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2198 spin_unlock_bh(&mvmsta->lock);
2199 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2200 return -EIO;
2201 }
2202
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002203 spin_lock(&mvm->queue_info_lock);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002204
Liad Kaufmancf961e12015-08-13 19:16:08 +03002205 /*
2206 * Note the possible cases:
2207 * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
2208 * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
2209 * one and mark it as reserved
2210 * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
2211 * non-DQA mode, since the TXQ hasn't yet been allocated
2212 */
2213 txq_id = mvmsta->tid_data[tid].txq_id;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002214 if (iwl_mvm_is_dqa_supported(mvm) &&
2215 unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) {
2216 ret = -ENXIO;
2217 IWL_DEBUG_TX_QUEUES(mvm,
2218 "Can't start tid %d agg on shared queue!\n",
2219 tid);
2220 goto release_locks;
2221 } else if (!iwl_mvm_is_dqa_supported(mvm) ||
Liad Kaufmancf961e12015-08-13 19:16:08 +03002222 mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
Liad Kaufman9794c642015-08-19 17:34:28 +03002223 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2224 mvm->first_agg_queue,
Liad Kaufmancf961e12015-08-13 19:16:08 +03002225 mvm->last_agg_queue);
2226 if (txq_id < 0) {
2227 ret = txq_id;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002228 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2229 goto release_locks;
2230 }
2231
2232 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2233 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002234 }
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002235
2236 spin_unlock(&mvm->queue_info_lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002237
Liad Kaufmancf961e12015-08-13 19:16:08 +03002238 IWL_DEBUG_TX_QUEUES(mvm,
2239 "AGG for tid %d will be on queue #%d\n",
2240 tid, txq_id);
2241
Johannes Berg8ca151b2013-01-24 14:25:36 +01002242 tid_data = &mvmsta->tid_data[tid];
Johannes Berg9a886582013-02-15 19:25:00 +01002243 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002244 tid_data->txq_id = txq_id;
2245 *ssn = tid_data->ssn;
2246
2247 IWL_DEBUG_TX_QUEUES(mvm,
2248 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2249 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2250 tid_data->next_reclaimed);
2251
2252 if (tid_data->ssn == tid_data->next_reclaimed) {
2253 tid_data->state = IWL_AGG_STARTING;
2254 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2255 } else {
2256 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2257 }
2258
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002259 ret = 0;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002260 goto out;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002261
2262release_locks:
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002263 spin_unlock(&mvm->queue_info_lock);
2264out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01002265 spin_unlock_bh(&mvmsta->lock);
2266
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002267 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002268}
2269
2270int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002271 struct ieee80211_sta *sta, u16 tid, u8 buf_size,
2272 bool amsdu)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002273{
Johannes Berg5b577a92013-11-14 18:20:04 +01002274 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002275 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +02002276 unsigned int wdg_timeout =
2277 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002278 int queue, ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002279 bool alloc_queue = true;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002280 enum iwl_mvm_queue_status queue_status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002281 u16 ssn;
2282
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002283 struct iwl_trans_txq_scd_cfg cfg = {
2284 .sta_id = mvmsta->sta_id,
2285 .tid = tid,
2286 .frame_limit = buf_size,
2287 .aggregate = true,
2288 };
2289
Eyal Shapiraefed6642014-09-14 15:58:53 +03002290 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2291 != IWL_MAX_TID_COUNT);
2292
Johannes Berg8ca151b2013-01-24 14:25:36 +01002293 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
2294
2295 spin_lock_bh(&mvmsta->lock);
2296 ssn = tid_data->ssn;
2297 queue = tid_data->txq_id;
2298 tid_data->state = IWL_AGG_ON;
Eyal Shapiraefed6642014-09-14 15:58:53 +03002299 mvmsta->agg_tids |= BIT(tid);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002300 tid_data->ssn = 0xffff;
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002301 tid_data->amsdu_in_ampdu_allowed = amsdu;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002302 spin_unlock_bh(&mvmsta->lock);
2303
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002304 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
Johannes Berg8ca151b2013-01-24 14:25:36 +01002305
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002306 spin_lock_bh(&mvm->queue_info_lock);
2307 queue_status = mvm->queue_info[queue].status;
2308 spin_unlock_bh(&mvm->queue_info_lock);
2309
Liad Kaufmancf961e12015-08-13 19:16:08 +03002310 /* In DQA mode, the existing queue might need to be reconfigured */
2311 if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002312 /* Maybe there is no need to even alloc a queue... */
2313 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2314 alloc_queue = false;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002315
2316 /*
2317 * Only reconfig the SCD for the queue if the window size has
2318 * changed from current (become smaller)
2319 */
2320 if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
2321 /*
2322 * If reconfiguring an existing queue, it first must be
2323 * drained
2324 */
2325 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
2326 BIT(queue));
2327 if (ret) {
2328 IWL_ERR(mvm,
2329 "Error draining queue before reconfig\n");
2330 return ret;
2331 }
2332
2333 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2334 mvmsta->sta_id, tid,
2335 buf_size, ssn);
2336 if (ret) {
2337 IWL_ERR(mvm,
2338 "Error reconfiguring TXQ #%d\n", queue);
2339 return ret;
2340 }
2341 }
2342 }
2343
2344 if (alloc_queue)
2345 iwl_mvm_enable_txq(mvm, queue,
2346 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
2347 &cfg, wdg_timeout);
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03002348
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002349 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2350 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2351 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2352 if (ret)
2353 return -EIO;
2354 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002355
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002356 /* No need to mark as reserved */
2357 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002358 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002359 spin_unlock_bh(&mvm->queue_info_lock);
2360
Johannes Berg8ca151b2013-01-24 14:25:36 +01002361 /*
2362 * Even though in theory the peer could have different
2363 * aggregation reorder buffer sizes for different sessions,
2364 * our ucode doesn't allow for that and has a global limit
2365 * for each station. Therefore, use the minimum of all the
2366 * aggregation sessions and our default value.
2367 */
2368 mvmsta->max_agg_bufsize =
2369 min(mvmsta->max_agg_bufsize, buf_size);
2370 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2371
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03002372 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2373 sta->addr, tid);
2374
Eyal Shapira9e680942013-11-09 00:16:16 +02002375 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002376}
2377
2378int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2379 struct ieee80211_sta *sta, u16 tid)
2380{
Johannes Berg5b577a92013-11-14 18:20:04 +01002381 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002382 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2383 u16 txq_id;
2384 int err;
2385
Emmanuel Grumbachf9aa8dd2013-03-04 09:11:08 +02002386 /*
2387 * If mac80211 is cleaning its state, then say that we finished since
2388 * our state has been cleared anyway.
2389 */
2390 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
2391 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2392 return 0;
2393 }
2394
Johannes Berg8ca151b2013-01-24 14:25:36 +01002395 spin_lock_bh(&mvmsta->lock);
2396
2397 txq_id = tid_data->txq_id;
2398
2399 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
2400 mvmsta->sta_id, tid, txq_id, tid_data->state);
2401
Eyal Shapiraefed6642014-09-14 15:58:53 +03002402 mvmsta->agg_tids &= ~BIT(tid);
2403
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002404 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002405 /*
2406 * The TXQ is marked as reserved only if no traffic came through yet
2407 * This means no traffic has been sent on this TID (agg'd or not), so
2408 * we no longer have use for the queue. Since it hasn't even been
2409 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2410 * free.
2411 */
2412 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2413 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002414
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002415 spin_unlock_bh(&mvm->queue_info_lock);
2416
Johannes Berg8ca151b2013-01-24 14:25:36 +01002417 switch (tid_data->state) {
2418 case IWL_AGG_ON:
Johannes Berg9a886582013-02-15 19:25:00 +01002419 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002420
2421 IWL_DEBUG_TX_QUEUES(mvm,
2422 "ssn = %d, next_recl = %d\n",
2423 tid_data->ssn, tid_data->next_reclaimed);
2424
2425 /* There are still packets for this RA / TID in the HW */
2426 if (tid_data->ssn != tid_data->next_reclaimed) {
2427 tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
2428 err = 0;
2429 break;
2430 }
2431
2432 tid_data->ssn = 0xffff;
Johannes Bergf7f89e72014-08-05 15:24:44 +02002433 tid_data->state = IWL_AGG_OFF;
Johannes Bergf7f89e72014-08-05 15:24:44 +02002434 spin_unlock_bh(&mvmsta->lock);
2435
2436 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2437
2438 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2439
Liad Kaufmancf961e12015-08-13 19:16:08 +03002440 if (!iwl_mvm_is_dqa_supported(mvm)) {
2441 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
2442
2443 iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
2444 }
Johannes Bergf7f89e72014-08-05 15:24:44 +02002445 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002446 case IWL_AGG_STARTING:
2447 case IWL_EMPTYING_HW_QUEUE_ADDBA:
2448 /*
2449 * The agg session has been stopped before it was set up. This
2450 * can happen when the AddBA timer times out for example.
2451 */
2452
2453 /* No barriers since we are under mutex */
2454 lockdep_assert_held(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002455
2456 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2457 tid_data->state = IWL_AGG_OFF;
2458 err = 0;
2459 break;
2460 default:
2461 IWL_ERR(mvm,
2462 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2463 mvmsta->sta_id, tid, tid_data->state);
2464 IWL_ERR(mvm,
2465 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
2466 err = -EINVAL;
2467 }
2468
2469 spin_unlock_bh(&mvmsta->lock);
2470
2471 return err;
2472}
2473
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002474int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2475 struct ieee80211_sta *sta, u16 tid)
2476{
Johannes Berg5b577a92013-11-14 18:20:04 +01002477 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002478 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2479 u16 txq_id;
Johannes Bergb6658ff2013-07-24 13:55:51 +02002480 enum iwl_mvm_agg_state old_state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002481
2482 /*
2483 * First set the agg state to OFF to avoid calling
2484 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2485 */
2486 spin_lock_bh(&mvmsta->lock);
2487 txq_id = tid_data->txq_id;
2488 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
2489 mvmsta->sta_id, tid, txq_id, tid_data->state);
Johannes Bergb6658ff2013-07-24 13:55:51 +02002490 old_state = tid_data->state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002491 tid_data->state = IWL_AGG_OFF;
Eyal Shapiraefed6642014-09-14 15:58:53 +03002492 mvmsta->agg_tids &= ~BIT(tid);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002493 spin_unlock_bh(&mvmsta->lock);
2494
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002495 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002496 /*
2497 * The TXQ is marked as reserved only if no traffic came through yet
2498 * This means no traffic has been sent on this TID (agg'd or not), so
2499 * we no longer have use for the queue. Since it hasn't even been
2500 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2501 * free.
2502 */
2503 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2504 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002505 spin_unlock_bh(&mvm->queue_info_lock);
2506
Johannes Bergb6658ff2013-07-24 13:55:51 +02002507 if (old_state >= IWL_AGG_ON) {
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02002508 iwl_mvm_drain_sta(mvm, mvmsta, true);
Luca Coelho5888a402015-10-06 09:54:57 +03002509 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
Johannes Bergb6658ff2013-07-24 13:55:51 +02002510 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02002511 iwl_trans_wait_tx_queue_empty(mvm->trans,
2512 mvmsta->tfd_queue_msk);
2513 iwl_mvm_drain_sta(mvm, mvmsta, false);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002514
Johannes Bergf7f89e72014-08-05 15:24:44 +02002515 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2516
Liad Kaufmancf961e12015-08-13 19:16:08 +03002517 if (!iwl_mvm_is_dqa_supported(mvm)) {
2518 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
2519
2520 iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
2521 tid, 0);
2522 }
Johannes Bergb6658ff2013-07-24 13:55:51 +02002523 }
2524
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002525 return 0;
2526}
2527
Johannes Berg8ca151b2013-01-24 14:25:36 +01002528static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
2529{
Johannes Berg2dc2a152015-06-16 17:09:18 +02002530 int i, max = -1, max_offs = -1;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002531
2532 lockdep_assert_held(&mvm->mutex);
2533
Johannes Berg2dc2a152015-06-16 17:09:18 +02002534 /* Pick the unused key offset with the highest 'deleted'
2535 * counter. Every time a key is deleted, all the counters
2536 * are incremented and the one that was just deleted is
2537 * reset to zero. Thus, the highest counter is the one
2538 * that was deleted longest ago. Pick that one.
2539 */
2540 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2541 if (test_bit(i, mvm->fw_key_table))
2542 continue;
2543 if (mvm->fw_key_deleted[i] > max) {
2544 max = mvm->fw_key_deleted[i];
2545 max_offs = i;
2546 }
2547 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002548
Johannes Berg2dc2a152015-06-16 17:09:18 +02002549 if (max_offs < 0)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002550 return STA_KEY_IDX_INVALID;
2551
Johannes Berg2dc2a152015-06-16 17:09:18 +02002552 return max_offs;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002553}
2554
Johannes Berg5f7a1842015-12-11 09:36:10 +01002555static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
2556 struct ieee80211_vif *vif,
2557 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002558{
Johannes Berg5b530e92014-12-23 16:00:17 +01002559 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002560
Johannes Berg5f7a1842015-12-11 09:36:10 +01002561 if (sta)
2562 return iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002563
2564 /*
2565 * The device expects GTKs for station interfaces to be
2566 * installed as GTKs for the AP station. If we have no
2567 * station ID, then use AP's station ID.
2568 */
2569 if (vif->type == NL80211_IFTYPE_STATION &&
Avri Altman9513c5e2015-10-19 16:29:11 +02002570 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
2571 u8 sta_id = mvmvif->ap_sta_id;
2572
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03002573 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
2574 lockdep_is_held(&mvm->mutex));
2575
Avri Altman9513c5e2015-10-19 16:29:11 +02002576 /*
2577 * It is possible that the 'sta' parameter is NULL,
2578 * for example when a GTK is removed - the sta_id will then
2579 * be the AP ID, and no station was passed by mac80211.
2580 */
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03002581 if (IS_ERR_OR_NULL(sta))
2582 return NULL;
2583
2584 return iwl_mvm_sta_from_mac80211(sta);
Avri Altman9513c5e2015-10-19 16:29:11 +02002585 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002586
Johannes Berg5f7a1842015-12-11 09:36:10 +01002587 return NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002588}
2589
2590static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
2591 struct iwl_mvm_sta *mvm_sta,
Johannes Bergba3943b2014-11-12 23:54:48 +01002592 struct ieee80211_key_conf *keyconf, bool mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002593 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
2594 u8 key_offset)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002595{
Max Stepanov5a258aa2013-04-07 09:11:21 +03002596 struct iwl_mvm_add_sta_key_cmd cmd = {};
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002597 __le16 key_flags;
Johannes Berg79920742014-11-03 15:43:04 +01002598 int ret;
2599 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002600 u16 keyidx;
2601 int i;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002602 u8 sta_id = mvm_sta->sta_id;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002603
2604 keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2605 STA_KEY_FLG_KEYID_MSK;
2606 key_flags = cpu_to_le16(keyidx);
2607 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
2608
2609 switch (keyconf->cipher) {
2610 case WLAN_CIPHER_SUITE_TKIP:
2611 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
Max Stepanov5a258aa2013-04-07 09:11:21 +03002612 cmd.tkip_rx_tsc_byte2 = tkip_iv32;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002613 for (i = 0; i < 5; i++)
Max Stepanov5a258aa2013-04-07 09:11:21 +03002614 cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
2615 memcpy(cmd.key, keyconf->key, keyconf->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002616 break;
2617 case WLAN_CIPHER_SUITE_CCMP:
2618 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
Max Stepanov5a258aa2013-04-07 09:11:21 +03002619 memcpy(cmd.key, keyconf->key, keyconf->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002620 break;
Johannes Bergba3943b2014-11-12 23:54:48 +01002621 case WLAN_CIPHER_SUITE_WEP104:
2622 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
John W. Linvilleaa0cb082015-01-12 16:18:11 -05002623 /* fall through */
Johannes Bergba3943b2014-11-12 23:54:48 +01002624 case WLAN_CIPHER_SUITE_WEP40:
2625 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
2626 memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
2627 break;
Ayala Beker2a53d162016-04-07 16:21:57 +03002628 case WLAN_CIPHER_SUITE_GCMP_256:
2629 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
2630 /* fall through */
2631 case WLAN_CIPHER_SUITE_GCMP:
2632 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
2633 memcpy(cmd.key, keyconf->key, keyconf->keylen);
2634 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002635 default:
Max Stepanove36e5432013-08-27 19:56:13 +03002636 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
2637 memcpy(cmd.key, keyconf->key, keyconf->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002638 }
2639
Johannes Bergba3943b2014-11-12 23:54:48 +01002640 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002641 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2642
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002643 cmd.key_offset = key_offset;
Max Stepanov5a258aa2013-04-07 09:11:21 +03002644 cmd.key_flags = key_flags;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002645 cmd.sta_id = sta_id;
2646
2647 status = ADD_STA_SUCCESS;
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002648 if (cmd_flags & CMD_ASYNC)
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002649 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
2650 sizeof(cmd), &cmd);
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002651 else
2652 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
2653 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002654
2655 switch (status) {
2656 case ADD_STA_SUCCESS:
2657 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
2658 break;
2659 default:
2660 ret = -EIO;
2661 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
2662 break;
2663 }
2664
2665 return ret;
2666}
2667
2668static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
2669 struct ieee80211_key_conf *keyconf,
2670 u8 sta_id, bool remove_key)
2671{
2672 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
2673
2674 /* verify the key details match the required command's expectations */
Ayala Beker8e160ab2016-04-11 11:37:38 +03002675 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
2676 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
2677 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
2678 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
2679 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
2680 return -EINVAL;
2681
2682 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
2683 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
Johannes Berg8ca151b2013-01-24 14:25:36 +01002684 return -EINVAL;
2685
2686 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
2687 igtk_cmd.sta_id = cpu_to_le32(sta_id);
2688
2689 if (remove_key) {
2690 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
2691 } else {
2692 struct ieee80211_key_seq seq;
2693 const u8 *pn;
2694
Ayala Bekeraa950522016-06-01 00:28:09 +03002695 switch (keyconf->cipher) {
2696 case WLAN_CIPHER_SUITE_AES_CMAC:
2697 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
2698 break;
Ayala Beker8e160ab2016-04-11 11:37:38 +03002699 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
2700 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
2701 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
2702 break;
Ayala Bekeraa950522016-06-01 00:28:09 +03002703 default:
2704 return -EINVAL;
2705 }
2706
Ayala Beker8e160ab2016-04-11 11:37:38 +03002707 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
2708 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
2709 igtk_cmd.ctrl_flags |=
2710 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002711 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
2712 pn = seq.aes_cmac.pn;
2713 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
2714 ((u64) pn[4] << 8) |
2715 ((u64) pn[3] << 16) |
2716 ((u64) pn[2] << 24) |
2717 ((u64) pn[1] << 32) |
2718 ((u64) pn[0] << 40));
2719 }
2720
2721 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
2722 remove_key ? "removing" : "installing",
2723 igtk_cmd.sta_id);
2724
Ayala Beker8e160ab2016-04-11 11:37:38 +03002725 if (!iwl_mvm_has_new_rx_api(mvm)) {
2726 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
2727 .ctrl_flags = igtk_cmd.ctrl_flags,
2728 .key_id = igtk_cmd.key_id,
2729 .sta_id = igtk_cmd.sta_id,
2730 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
2731 };
2732
2733 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
2734 ARRAY_SIZE(igtk_cmd_v1.igtk));
2735 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
2736 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
2737 }
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002738 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01002739 sizeof(igtk_cmd), &igtk_cmd);
2740}
2741
2742
2743static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
2744 struct ieee80211_vif *vif,
2745 struct ieee80211_sta *sta)
2746{
Johannes Berg5b530e92014-12-23 16:00:17 +01002747 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002748
2749 if (sta)
2750 return sta->addr;
2751
2752 if (vif->type == NL80211_IFTYPE_STATION &&
2753 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
2754 u8 sta_id = mvmvif->ap_sta_id;
2755 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
2756 lockdep_is_held(&mvm->mutex));
2757 return sta->addr;
2758 }
2759
2760
2761 return NULL;
2762}
2763
Johannes Berg2f6319d2014-11-12 23:39:56 +01002764static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
2765 struct ieee80211_vif *vif,
2766 struct ieee80211_sta *sta,
Johannes Bergba3943b2014-11-12 23:54:48 +01002767 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002768 u8 key_offset,
Johannes Bergba3943b2014-11-12 23:54:48 +01002769 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002770{
Johannes Berg2f6319d2014-11-12 23:39:56 +01002771 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002772 int ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002773 const u8 *addr;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002774 struct ieee80211_key_seq seq;
2775 u16 p1k[5];
2776
Johannes Berg8ca151b2013-01-24 14:25:36 +01002777 switch (keyconf->cipher) {
2778 case WLAN_CIPHER_SUITE_TKIP:
2779 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
2780 /* get phase 1 key from mac80211 */
2781 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
2782 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
Johannes Bergba3943b2014-11-12 23:54:48 +01002783 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002784 seq.tkip.iv32, p1k, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002785 break;
2786 case WLAN_CIPHER_SUITE_CCMP:
Johannes Bergba3943b2014-11-12 23:54:48 +01002787 case WLAN_CIPHER_SUITE_WEP40:
2788 case WLAN_CIPHER_SUITE_WEP104:
Ayala Beker2a53d162016-04-07 16:21:57 +03002789 case WLAN_CIPHER_SUITE_GCMP:
2790 case WLAN_CIPHER_SUITE_GCMP_256:
Johannes Bergba3943b2014-11-12 23:54:48 +01002791 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002792 0, NULL, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002793 break;
2794 default:
Johannes Bergba3943b2014-11-12 23:54:48 +01002795 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002796 0, NULL, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002797 }
2798
Johannes Berg8ca151b2013-01-24 14:25:36 +01002799 return ret;
2800}
2801
Johannes Berg2f6319d2014-11-12 23:39:56 +01002802static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
Johannes Bergba3943b2014-11-12 23:54:48 +01002803 struct ieee80211_key_conf *keyconf,
2804 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002805{
Max Stepanov5a258aa2013-04-07 09:11:21 +03002806 struct iwl_mvm_add_sta_key_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01002807 __le16 key_flags;
Johannes Berg79920742014-11-03 15:43:04 +01002808 int ret;
2809 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002810
Emmanuel Grumbach8115efb2013-02-05 10:08:35 +02002811 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2812 STA_KEY_FLG_KEYID_MSK);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002813 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2814 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2815
Johannes Bergba3943b2014-11-12 23:54:48 +01002816 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002817 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2818
Max Stepanov5a258aa2013-04-07 09:11:21 +03002819 cmd.key_flags = key_flags;
2820 cmd.key_offset = keyconf->hw_key_idx;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002821 cmd.sta_id = sta_id;
2822
Johannes Berg8ca151b2013-01-24 14:25:36 +01002823 status = ADD_STA_SUCCESS;
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002824 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
2825 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002826
2827 switch (status) {
2828 case ADD_STA_SUCCESS:
2829 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2830 break;
2831 default:
2832 ret = -EIO;
2833 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2834 break;
2835 }
2836
2837 return ret;
2838}
2839
Johannes Berg2f6319d2014-11-12 23:39:56 +01002840int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
2841 struct ieee80211_vif *vif,
2842 struct ieee80211_sta *sta,
2843 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002844 u8 key_offset)
Johannes Berg2f6319d2014-11-12 23:39:56 +01002845{
Johannes Bergba3943b2014-11-12 23:54:48 +01002846 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01002847 struct iwl_mvm_sta *mvm_sta;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002848 u8 sta_id;
2849 int ret;
Matti Gottlieb11828db2015-06-01 15:15:11 +03002850 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
Johannes Berg2f6319d2014-11-12 23:39:56 +01002851
2852 lockdep_assert_held(&mvm->mutex);
2853
2854 /* Get the station id from the mvm local station table */
Johannes Berg5f7a1842015-12-11 09:36:10 +01002855 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
2856 if (!mvm_sta) {
2857 IWL_ERR(mvm, "Failed to find station\n");
Johannes Berg2f6319d2014-11-12 23:39:56 +01002858 return -EINVAL;
2859 }
Johannes Berg5f7a1842015-12-11 09:36:10 +01002860 sta_id = mvm_sta->sta_id;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002861
Ayala Beker8e160ab2016-04-11 11:37:38 +03002862 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
2863 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
2864 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01002865 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
2866 goto end;
2867 }
2868
2869 /*
2870 * It is possible that the 'sta' parameter is NULL, and thus
2871 * there is a need to retrieve the sta from the local station table.
2872 */
2873 if (!sta) {
2874 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
2875 lockdep_is_held(&mvm->mutex));
2876 if (IS_ERR_OR_NULL(sta)) {
2877 IWL_ERR(mvm, "Invalid station id\n");
2878 return -EINVAL;
2879 }
2880 }
2881
2882 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
2883 return -EINVAL;
2884
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002885 /* If the key_offset is not pre-assigned, we need to find a
2886 * new offset to use. In normal cases, the offset is not
2887 * pre-assigned, but during HW_RESTART we want to reuse the
2888 * same indices, so we pass them when this function is called.
2889 *
2890 * In D3 entry, we need to hardcoded the indices (because the
2891 * firmware hardcodes the PTK offset to 0). In this case, we
2892 * need to make sure we don't overwrite the hw_key_idx in the
2893 * keyconf structure, because otherwise we cannot configure
2894 * the original ones back when resuming.
2895 */
2896 if (key_offset == STA_KEY_IDX_INVALID) {
2897 key_offset = iwl_mvm_set_fw_key_idx(mvm);
2898 if (key_offset == STA_KEY_IDX_INVALID)
Johannes Berg2f6319d2014-11-12 23:39:56 +01002899 return -ENOSPC;
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002900 keyconf->hw_key_idx = key_offset;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002901 }
2902
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002903 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02002904 if (ret)
Johannes Bergba3943b2014-11-12 23:54:48 +01002905 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01002906
2907 /*
2908 * For WEP, the same key is used for multicast and unicast. Upload it
2909 * again, using the same key offset, and now pointing the other one
2910 * to the same key slot (offset).
2911 * If this fails, remove the original as well.
2912 */
2913 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2914 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002915 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
2916 key_offset, !mcast);
Johannes Bergba3943b2014-11-12 23:54:48 +01002917 if (ret) {
Johannes Bergba3943b2014-11-12 23:54:48 +01002918 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02002919 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01002920 }
2921 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01002922
Luca Coelho9c3deeb2015-11-11 01:06:17 +02002923 __set_bit(key_offset, mvm->fw_key_table);
2924
Johannes Berg2f6319d2014-11-12 23:39:56 +01002925end:
2926 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
2927 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
Matti Gottlieb11828db2015-06-01 15:15:11 +03002928 sta ? sta->addr : zero_addr, ret);
Johannes Berg2f6319d2014-11-12 23:39:56 +01002929 return ret;
2930}
2931
2932int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
2933 struct ieee80211_vif *vif,
2934 struct ieee80211_sta *sta,
2935 struct ieee80211_key_conf *keyconf)
2936{
Johannes Bergba3943b2014-11-12 23:54:48 +01002937 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01002938 struct iwl_mvm_sta *mvm_sta;
2939 u8 sta_id = IWL_MVM_STATION_COUNT;
Johannes Berg2dc2a152015-06-16 17:09:18 +02002940 int ret, i;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002941
2942 lockdep_assert_held(&mvm->mutex);
2943
Johannes Berg5f7a1842015-12-11 09:36:10 +01002944 /* Get the station from the mvm local station table */
2945 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
Johannes Berg2f6319d2014-11-12 23:39:56 +01002946
2947 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
2948 keyconf->keyidx, sta_id);
2949
Ayala Beker8e160ab2016-04-11 11:37:38 +03002950 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
2951 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
2952 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
Johannes Berg2f6319d2014-11-12 23:39:56 +01002953 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
2954
2955 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
2956 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
2957 keyconf->hw_key_idx);
2958 return -ENOENT;
2959 }
2960
Johannes Berg2dc2a152015-06-16 17:09:18 +02002961 /* track which key was deleted last */
2962 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2963 if (mvm->fw_key_deleted[i] < U8_MAX)
2964 mvm->fw_key_deleted[i]++;
2965 }
2966 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
2967
Johannes Berg5f7a1842015-12-11 09:36:10 +01002968 if (!mvm_sta) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01002969 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
2970 return 0;
2971 }
2972
Johannes Berg5f7a1842015-12-11 09:36:10 +01002973 sta_id = mvm_sta->sta_id;
2974
Johannes Bergba3943b2014-11-12 23:54:48 +01002975 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
2976 if (ret)
2977 return ret;
2978
2979 /* delete WEP key twice to get rid of (now useless) offset */
2980 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2981 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
2982 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
2983
2984 return ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01002985}
2986
Johannes Berg8ca151b2013-01-24 14:25:36 +01002987void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
2988 struct ieee80211_vif *vif,
2989 struct ieee80211_key_conf *keyconf,
2990 struct ieee80211_sta *sta, u32 iv32,
2991 u16 *phase1key)
2992{
Beni Levc3eb5362013-02-06 17:22:18 +02002993 struct iwl_mvm_sta *mvm_sta;
Johannes Bergba3943b2014-11-12 23:54:48 +01002994 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002995
Beni Levc3eb5362013-02-06 17:22:18 +02002996 rcu_read_lock();
2997
Johannes Berg5f7a1842015-12-11 09:36:10 +01002998 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
2999 if (WARN_ON_ONCE(!mvm_sta))
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003000 goto unlock;
Johannes Bergba3943b2014-11-12 23:54:48 +01003001 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003002 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003003
3004 unlock:
Beni Levc3eb5362013-02-06 17:22:18 +02003005 rcu_read_unlock();
Johannes Berg8ca151b2013-01-24 14:25:36 +01003006}
3007
Johannes Berg9cc40712013-02-15 22:47:48 +01003008void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3009 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003010{
Johannes Berg5b577a92013-11-14 18:20:04 +01003011 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003012 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003013 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003014 .sta_id = mvmsta->sta_id,
Emmanuel Grumbach5af01772013-06-09 12:59:24 +03003015 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
Johannes Berg9cc40712013-02-15 22:47:48 +01003016 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003017 };
3018 int ret;
3019
Sara Sharon854c5702016-01-26 13:17:47 +02003020 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3021 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003022 if (ret)
3023 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3024}
3025
Johannes Berg9cc40712013-02-15 22:47:48 +01003026void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3027 struct ieee80211_sta *sta,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003028 enum ieee80211_frame_release_type reason,
Johannes Berg3e56ead2013-02-15 22:23:18 +01003029 u16 cnt, u16 tids, bool more_data,
3030 bool agg)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003031{
Johannes Berg5b577a92013-11-14 18:20:04 +01003032 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003033 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003034 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003035 .sta_id = mvmsta->sta_id,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003036 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3037 .sleep_tx_count = cpu_to_le16(cnt),
Johannes Berg9cc40712013-02-15 22:47:48 +01003038 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003039 };
Johannes Berg3e56ead2013-02-15 22:23:18 +01003040 int tid, ret;
3041 unsigned long _tids = tids;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003042
Johannes Berg3e56ead2013-02-15 22:23:18 +01003043 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3044 * Note that this field is reserved and unused by firmware not
3045 * supporting GO uAPSD, so it's safe to always do this.
3046 */
3047 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3048 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3049
3050 /* If we're releasing frames from aggregation queues then check if the
3051 * all queues combined that we're releasing frames from have
3052 * - more frames than the service period, in which case more_data
3053 * needs to be set
3054 * - fewer than 'cnt' frames, in which case we need to adjust the
3055 * firmware command (but do that unconditionally)
3056 */
3057 if (agg) {
3058 int remaining = cnt;
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003059 int sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003060
3061 spin_lock_bh(&mvmsta->lock);
3062 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3063 struct iwl_mvm_tid_data *tid_data;
3064 u16 n_queued;
3065
3066 tid_data = &mvmsta->tid_data[tid];
3067 if (WARN(tid_data->state != IWL_AGG_ON &&
3068 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
3069 "TID %d state is %d\n",
3070 tid, tid_data->state)) {
3071 spin_unlock_bh(&mvmsta->lock);
3072 ieee80211_sta_eosp(sta);
3073 return;
3074 }
3075
3076 n_queued = iwl_mvm_tid_queued(tid_data);
3077 if (n_queued > remaining) {
3078 more_data = true;
3079 remaining = 0;
3080 break;
3081 }
3082 remaining -= n_queued;
3083 }
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003084 sleep_tx_count = cnt - remaining;
3085 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3086 mvmsta->sleep_tx_count = sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003087 spin_unlock_bh(&mvmsta->lock);
3088
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003089 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003090 if (WARN_ON(cnt - remaining == 0)) {
3091 ieee80211_sta_eosp(sta);
3092 return;
3093 }
3094 }
3095
3096 /* Note: this is ignored by firmware not supporting GO uAPSD */
3097 if (more_data)
3098 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
3099
3100 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3101 mvmsta->next_status_eosp = true;
3102 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
3103 } else {
3104 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
3105 }
3106
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +02003107 /* block the Tx queues until the FW updated the sleep Tx count */
3108 iwl_trans_block_txq_ptrs(mvm->trans, true);
3109
3110 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3111 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
Sara Sharon854c5702016-01-26 13:17:47 +02003112 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003113 if (ret)
3114 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3115}
Johannes Berg3e56ead2013-02-15 22:23:18 +01003116
Johannes Berg04168412015-06-23 21:22:09 +02003117void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3118 struct iwl_rx_cmd_buffer *rxb)
Johannes Berg3e56ead2013-02-15 22:23:18 +01003119{
3120 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3121 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3122 struct ieee80211_sta *sta;
3123 u32 sta_id = le32_to_cpu(notif->sta_id);
3124
3125 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
Johannes Berg04168412015-06-23 21:22:09 +02003126 return;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003127
3128 rcu_read_lock();
3129 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3130 if (!IS_ERR_OR_NULL(sta))
3131 ieee80211_sta_eosp(sta);
3132 rcu_read_unlock();
Johannes Berg3e56ead2013-02-15 22:23:18 +01003133}
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003134
3135void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3136 struct iwl_mvm_sta *mvmsta, bool disable)
3137{
3138 struct iwl_mvm_add_sta_cmd cmd = {
3139 .add_modify = STA_MODE_MODIFY,
3140 .sta_id = mvmsta->sta_id,
3141 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3142 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3143 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3144 };
3145 int ret;
3146
Sara Sharon854c5702016-01-26 13:17:47 +02003147 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3148 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003149 if (ret)
3150 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3151}
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003152
3153void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3154 struct ieee80211_sta *sta,
3155 bool disable)
3156{
3157 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3158
3159 spin_lock_bh(&mvm_sta->lock);
3160
3161 if (mvm_sta->disable_tx == disable) {
3162 spin_unlock_bh(&mvm_sta->lock);
3163 return;
3164 }
3165
3166 mvm_sta->disable_tx = disable;
3167
3168 /*
Sara Sharon0d365ae2015-03-31 12:24:05 +03003169 * Tell mac80211 to start/stop queuing tx for this station,
3170 * but don't stop queuing if there are still pending frames
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003171 * for this station.
3172 */
3173 if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
3174 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3175
3176 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3177
3178 spin_unlock_bh(&mvm_sta->lock);
3179}
3180
3181void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3182 struct iwl_mvm_vif *mvmvif,
3183 bool disable)
3184{
3185 struct ieee80211_sta *sta;
3186 struct iwl_mvm_sta *mvm_sta;
3187 int i;
3188
3189 lockdep_assert_held(&mvm->mutex);
3190
3191 /* Block/unblock all the stations of the given mvmvif */
3192 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
3193 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3194 lockdep_is_held(&mvm->mutex));
3195 if (IS_ERR_OR_NULL(sta))
3196 continue;
3197
3198 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3199 if (mvm_sta->mac_id_n_color !=
3200 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3201 continue;
3202
3203 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3204 }
3205}
Luciano Coelhodc88b4b2014-11-10 11:10:14 +02003206
3207void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3208{
3209 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3210 struct iwl_mvm_sta *mvmsta;
3211
3212 rcu_read_lock();
3213
3214 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3215
3216 if (!WARN_ON(!mvmsta))
3217 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3218
3219 rcu_read_unlock();
3220}