blob: fd33b8d148b3147d38ad5517ca7d68aff189a682 [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03008 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020010 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Emmanuel Grumbach48831452018-01-29 10:00:05 +020011 * Copyright(c) 2018 Intel Corporation
Johannes Berg8ca151b2013-01-24 14:25:36 +010012 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
Johannes Berg8ca151b2013-01-24 14:25:36 +010022 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020023 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010024 *
25 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020026 * Intel Linux Wireless <linuxwifi@intel.com>
Johannes Berg8ca151b2013-01-24 14:25:36 +010027 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *
29 * BSD LICENSE
30 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +030031 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020033 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Liad Kaufmanbd8f3fc2018-01-17 15:25:28 +020034 * Copyright(c) 2018 Intel Corporation
Johannes Berg8ca151b2013-01-24 14:25:36 +010035 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
46 * distribution.
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *
63 *****************************************************************************/
64#include <net/mac80211.h>
65
66#include "mvm.h"
67#include "sta.h"
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +030068#include "rs.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010069
Avraham Stern337bfc92018-06-04 15:10:18 +030070static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm);
71
72static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
73 u32 sta_id,
74 struct ieee80211_key_conf *key, bool mcast,
75 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
76 u8 key_offset, bool mfp);
77
Sara Sharon854c5702016-01-26 13:17:47 +020078/*
79 * New version of ADD_STA_sta command added new fields at the end of the
80 * structure, so sending the size of the relevant API's structure is enough to
81 * support both API versions.
82 */
83static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
84{
Sara Sharonced19f22017-02-06 19:09:32 +020085 if (iwl_mvm_has_new_rx_api(mvm) ||
86 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
87 return sizeof(struct iwl_mvm_add_sta_cmd);
88 else
89 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
Sara Sharon854c5702016-01-26 13:17:47 +020090}
91
Eliad Pellerb92e6612014-01-23 17:58:23 +020092static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
93 enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +010094{
95 int sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +020096 u32 reserved_ids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +010097
Eliad Pellerb92e6612014-01-23 17:58:23 +020098 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
Johannes Berg8ca151b2013-01-24 14:25:36 +010099 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
100
101 lockdep_assert_held(&mvm->mutex);
102
Eliad Pellerb92e6612014-01-23 17:58:23 +0200103 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
104 if (iftype != NL80211_IFTYPE_STATION)
105 reserved_ids = BIT(0);
106
Johannes Berg8ca151b2013-01-24 14:25:36 +0100107 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
Sara Sharon0ae98812017-01-04 14:53:58 +0200108 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
Eliad Pellerb92e6612014-01-23 17:58:23 +0200109 if (BIT(sta_id) & reserved_ids)
110 continue;
111
Johannes Berg8ca151b2013-01-24 14:25:36 +0100112 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
113 lockdep_is_held(&mvm->mutex)))
114 return sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +0200115 }
Sara Sharon0ae98812017-01-04 14:53:58 +0200116 return IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100117}
118
Johannes Berg7a453972013-02-12 13:10:44 +0100119/* send station add/update command to firmware */
120int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Liad Kaufman24afba72015-07-28 18:56:08 +0300121 bool update, unsigned int flags)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100122{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +0100123 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300124 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
125 .sta_id = mvm_sta->sta_id,
126 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
127 .add_modify = update ? 1 : 0,
128 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
Naftali Goldstein8addabf2017-07-27 04:53:55 +0300129 STA_FLG_MIMO_EN_MSK |
130 STA_FLG_RTS_MIMO_PROT),
Liad Kaufmancf0cda12015-09-24 10:44:12 +0200131 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300132 };
Johannes Berg8ca151b2013-01-24 14:25:36 +0100133 int ret;
134 u32 status;
135 u32 agg_size = 0, mpdu_dens = 0;
136
Sara Sharonced19f22017-02-06 19:09:32 +0200137 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
138 add_sta_cmd.station_type = mvm_sta->sta_type;
139
Liad Kaufman24afba72015-07-28 18:56:08 +0300140 if (!update || (flags & STA_MODIFY_QUEUES)) {
Johannes Berg7a453972013-02-12 13:10:44 +0100141 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
Liad Kaufman24afba72015-07-28 18:56:08 +0300142
Sara Sharonbb497012016-09-29 14:52:40 +0300143 if (!iwl_mvm_has_new_tx_api(mvm)) {
144 add_sta_cmd.tfd_queue_msk =
145 cpu_to_le32(mvm_sta->tfd_queue_msk);
146
147 if (flags & STA_MODIFY_QUEUES)
148 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
149 } else {
150 WARN_ON(flags & STA_MODIFY_QUEUES);
151 }
Johannes Berg7a453972013-02-12 13:10:44 +0100152 }
Johannes Berg5bc5aaa2013-02-12 14:35:36 +0100153
154 switch (sta->bandwidth) {
155 case IEEE80211_STA_RX_BW_160:
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
157 /* fall through */
158 case IEEE80211_STA_RX_BW_80:
159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
160 /* fall through */
161 case IEEE80211_STA_RX_BW_40:
162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
163 /* fall through */
164 case IEEE80211_STA_RX_BW_20:
165 if (sta->ht_cap.ht_supported)
166 add_sta_cmd.station_flags |=
167 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
168 break;
169 }
170
171 switch (sta->rx_nss) {
172 case 1:
173 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
174 break;
175 case 2:
176 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
177 break;
178 case 3 ... 8:
179 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
180 break;
181 }
182
183 switch (sta->smps_mode) {
184 case IEEE80211_SMPS_AUTOMATIC:
185 case IEEE80211_SMPS_NUM_MODES:
186 WARN_ON(1);
187 break;
188 case IEEE80211_SMPS_STATIC:
189 /* override NSS */
190 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
191 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
192 break;
193 case IEEE80211_SMPS_DYNAMIC:
194 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
195 break;
196 case IEEE80211_SMPS_OFF:
197 /* nothing */
198 break;
199 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100200
201 if (sta->ht_cap.ht_supported) {
202 add_sta_cmd.station_flags_msk |=
203 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
204 STA_FLG_AGG_MPDU_DENS_MSK);
205
206 mpdu_dens = sta->ht_cap.ampdu_density;
207 }
208
209 if (sta->vht_cap.vht_supported) {
210 agg_size = sta->vht_cap.cap &
211 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
212 agg_size >>=
213 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
214 } else if (sta->ht_cap.ht_supported) {
215 agg_size = sta->ht_cap.ampdu_factor;
216 }
217
218 add_sta_cmd.station_flags |=
219 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
220 add_sta_cmd.station_flags |=
221 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
Gregory Greenmand94c5a82018-04-24 06:26:41 +0300222 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
Johannes Berg6ea29ce2016-12-01 16:25:30 +0100223 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100224
Johannes Berg65e25482016-04-13 14:24:22 +0200225 if (sta->wme) {
226 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
227
228 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200229 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
Johannes Berg65e25482016-04-13 14:24:22 +0200230 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200231 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
Johannes Berg65e25482016-04-13 14:24:22 +0200232 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200233 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
Johannes Berg65e25482016-04-13 14:24:22 +0200234 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200235 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
236 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
Emmanuel Grumbache71ca5e2017-02-08 14:53:32 +0200237 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
Johannes Berg65e25482016-04-13 14:24:22 +0200238 }
239
Johannes Berg8ca151b2013-01-24 14:25:36 +0100240 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +0200241 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
242 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +0300243 &add_sta_cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100244 if (ret)
245 return ret;
246
Sara Sharon837c4da2016-01-07 16:50:45 +0200247 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100248 case ADD_STA_SUCCESS:
249 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
250 break;
251 default:
252 ret = -EIO;
253 IWL_ERR(mvm, "ADD_STA failed\n");
254 break;
255 }
256
257 return ret;
258}
259
Kees Cook8cef5342017-10-24 02:29:37 -0700260static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
Sara Sharon10b2b202016-03-20 16:23:41 +0200261{
Kees Cook8cef5342017-10-24 02:29:37 -0700262 struct iwl_mvm_baid_data *data =
263 from_timer(data, t, session_timer);
264 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
Sara Sharon10b2b202016-03-20 16:23:41 +0200265 struct iwl_mvm_baid_data *ba_data;
266 struct ieee80211_sta *sta;
267 struct iwl_mvm_sta *mvm_sta;
268 unsigned long timeout;
269
270 rcu_read_lock();
271
272 ba_data = rcu_dereference(*rcu_ptr);
273
274 if (WARN_ON(!ba_data))
275 goto unlock;
276
277 if (!ba_data->timeout)
278 goto unlock;
279
280 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
281 if (time_is_after_jiffies(timeout)) {
282 mod_timer(&ba_data->session_timer, timeout);
283 goto unlock;
284 }
285
286 /* Timer expired */
287 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
Emmanuel Grumbach61dd8a82017-06-12 15:10:09 +0300288
289 /*
290 * sta should be valid unless the following happens:
291 * The firmware asserts which triggers a reconfig flow, but
292 * the reconfig fails before we set the pointer to sta into
293 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
294 * A-MDPU and hence the timer continues to run. Then, the
295 * timer expires and sta is NULL.
296 */
297 if (!sta)
298 goto unlock;
299
Sara Sharon10b2b202016-03-20 16:23:41 +0200300 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Naftali Goldstein20fc6902017-07-11 10:07:32 +0300301 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
302 sta->addr, ba_data->tid);
Sara Sharon10b2b202016-03-20 16:23:41 +0200303unlock:
304 rcu_read_unlock();
305}
306
Liad Kaufman9794c642015-08-19 17:34:28 +0300307/* Disable aggregations for a bitmap of TIDs for a given station */
308static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
309 unsigned long disable_agg_tids,
310 bool remove_queue)
311{
312 struct iwl_mvm_add_sta_cmd cmd = {};
313 struct ieee80211_sta *sta;
314 struct iwl_mvm_sta *mvmsta;
315 u32 status;
316 u8 sta_id;
317 int ret;
318
Sara Sharonbb497012016-09-29 14:52:40 +0300319 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
320 return -EINVAL;
321
Liad Kaufman9794c642015-08-19 17:34:28 +0300322 spin_lock_bh(&mvm->queue_info_lock);
323 sta_id = mvm->queue_info[queue].ra_sta_id;
324 spin_unlock_bh(&mvm->queue_info_lock);
325
326 rcu_read_lock();
327
328 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
329
330 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
331 rcu_read_unlock();
332 return -EINVAL;
333 }
334
335 mvmsta = iwl_mvm_sta_from_mac80211(sta);
336
337 mvmsta->tid_disable_agg |= disable_agg_tids;
338
339 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
340 cmd.sta_id = mvmsta->sta_id;
341 cmd.add_modify = STA_MODE_MODIFY;
342 cmd.modify_mask = STA_MODIFY_QUEUES;
343 if (disable_agg_tids)
344 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
345 if (remove_queue)
346 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
347 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
348 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
349
350 rcu_read_unlock();
351
352 /* Notify FW of queue removal from the STA queues */
353 status = ADD_STA_SUCCESS;
354 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
355 iwl_mvm_add_sta_cmd_size(mvm),
356 &cmd, &status);
357
358 return ret;
359}
360
Johannes Berg99448a82018-07-04 11:38:34 +0200361static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
362 int mac80211_queue, u8 tid, u8 flags)
363{
364 struct iwl_scd_txq_cfg_cmd cmd = {
365 .scd_queue = queue,
366 .action = SCD_CFG_DISABLE_QUEUE,
367 };
368 bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
369 int ret;
370
371 if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
372 return -EINVAL;
373
374 if (iwl_mvm_has_new_tx_api(mvm)) {
375 spin_lock_bh(&mvm->queue_info_lock);
376
377 if (remove_mac_queue)
378 mvm->hw_queue_to_mac80211[queue] &=
379 ~BIT(mac80211_queue);
380
381 spin_unlock_bh(&mvm->queue_info_lock);
382
383 iwl_trans_txq_free(mvm->trans, queue);
384
385 return 0;
386 }
387
388 spin_lock_bh(&mvm->queue_info_lock);
389
390 if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
391 spin_unlock_bh(&mvm->queue_info_lock);
392 return 0;
393 }
394
395 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
396
397 /*
398 * If there is another TID with the same AC - don't remove the MAC queue
399 * from the mapping
400 */
401 if (tid < IWL_MAX_TID_COUNT) {
402 unsigned long tid_bitmap =
403 mvm->queue_info[queue].tid_bitmap;
404 int ac = tid_to_mac80211_ac[tid];
405 int i;
406
407 for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
408 if (tid_to_mac80211_ac[i] == ac)
409 remove_mac_queue = false;
410 }
411 }
412
413 if (remove_mac_queue)
414 mvm->hw_queue_to_mac80211[queue] &=
415 ~BIT(mac80211_queue);
416 mvm->queue_info[queue].hw_queue_refcount--;
417
418 cmd.action = mvm->queue_info[queue].hw_queue_refcount ?
419 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
420 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
421 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
422
423 IWL_DEBUG_TX_QUEUES(mvm,
424 "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
425 queue,
426 mvm->queue_info[queue].hw_queue_refcount,
427 mvm->hw_queue_to_mac80211[queue]);
428
429 /* If the queue is still enabled - nothing left to do in this func */
430 if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
431 spin_unlock_bh(&mvm->queue_info_lock);
432 return 0;
433 }
434
435 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
436 cmd.tid = mvm->queue_info[queue].txq_tid;
437
438 /* Make sure queue info is correct even though we overwrite it */
439 WARN(mvm->queue_info[queue].hw_queue_refcount ||
440 mvm->queue_info[queue].tid_bitmap ||
441 mvm->hw_queue_to_mac80211[queue],
442 "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
443 queue, mvm->queue_info[queue].hw_queue_refcount,
444 mvm->hw_queue_to_mac80211[queue],
445 mvm->queue_info[queue].tid_bitmap);
446
447 /* If we are here - the queue is freed and we can zero out these vals */
448 mvm->queue_info[queue].hw_queue_refcount = 0;
449 mvm->queue_info[queue].tid_bitmap = 0;
450 mvm->hw_queue_to_mac80211[queue] = 0;
451
452 /* Regardless if this is a reserved TXQ for a STA - mark it as false */
453 mvm->queue_info[queue].reserved = false;
454
455 spin_unlock_bh(&mvm->queue_info_lock);
456
457 iwl_trans_txq_disable(mvm->trans, queue, false);
458 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
459 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
460
461 if (ret)
462 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
463 queue, ret);
464 return ret;
465}
466
Liad Kaufman42db09c2016-05-02 14:01:14 +0300467static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
468{
469 struct ieee80211_sta *sta;
470 struct iwl_mvm_sta *mvmsta;
471 unsigned long tid_bitmap;
472 unsigned long agg_tids = 0;
Sharon Dvir806911d2017-05-21 12:09:49 +0300473 u8 sta_id;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300474 int tid;
475
476 lockdep_assert_held(&mvm->mutex);
477
Sara Sharonbb497012016-09-29 14:52:40 +0300478 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
479 return -EINVAL;
480
Liad Kaufman42db09c2016-05-02 14:01:14 +0300481 spin_lock_bh(&mvm->queue_info_lock);
482 sta_id = mvm->queue_info[queue].ra_sta_id;
483 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
484 spin_unlock_bh(&mvm->queue_info_lock);
485
486 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
487 lockdep_is_held(&mvm->mutex));
488
489 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
490 return -EINVAL;
491
492 mvmsta = iwl_mvm_sta_from_mac80211(sta);
493
494 spin_lock_bh(&mvmsta->lock);
495 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
496 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
497 agg_tids |= BIT(tid);
498 }
499 spin_unlock_bh(&mvmsta->lock);
500
501 return agg_tids;
502}
503
Liad Kaufman9794c642015-08-19 17:34:28 +0300504/*
505 * Remove a queue from a station's resources.
506 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
507 * doesn't disable the queue
508 */
509static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
510{
511 struct ieee80211_sta *sta;
512 struct iwl_mvm_sta *mvmsta;
513 unsigned long tid_bitmap;
514 unsigned long disable_agg_tids = 0;
515 u8 sta_id;
516 int tid;
517
518 lockdep_assert_held(&mvm->mutex);
519
Sara Sharonbb497012016-09-29 14:52:40 +0300520 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
521 return -EINVAL;
522
Liad Kaufman9794c642015-08-19 17:34:28 +0300523 spin_lock_bh(&mvm->queue_info_lock);
524 sta_id = mvm->queue_info[queue].ra_sta_id;
525 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
526 spin_unlock_bh(&mvm->queue_info_lock);
527
528 rcu_read_lock();
529
530 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
531
532 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
533 rcu_read_unlock();
534 return 0;
535 }
536
537 mvmsta = iwl_mvm_sta_from_mac80211(sta);
538
539 spin_lock_bh(&mvmsta->lock);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300540 /* Unmap MAC queues and TIDs from this queue */
Liad Kaufman9794c642015-08-19 17:34:28 +0300541 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300542 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
543 disable_agg_tids |= BIT(tid);
Sara Sharon6862fce2017-02-22 19:34:17 +0200544 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman9794c642015-08-19 17:34:28 +0300545 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300546
Liad Kaufman42db09c2016-05-02 14:01:14 +0300547 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
Liad Kaufman9794c642015-08-19 17:34:28 +0300548 spin_unlock_bh(&mvmsta->lock);
549
550 rcu_read_unlock();
551
Liad Kaufman9794c642015-08-19 17:34:28 +0300552 return disable_agg_tids;
553}
554
Sara Sharon01796ff2016-11-16 17:04:36 +0200555static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
556 bool same_sta)
557{
558 struct iwl_mvm_sta *mvmsta;
559 u8 txq_curr_ac, sta_id, tid;
560 unsigned long disable_agg_tids = 0;
561 int ret;
562
563 lockdep_assert_held(&mvm->mutex);
564
Sara Sharonbb497012016-09-29 14:52:40 +0300565 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
566 return -EINVAL;
567
Sara Sharon01796ff2016-11-16 17:04:36 +0200568 spin_lock_bh(&mvm->queue_info_lock);
569 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
570 sta_id = mvm->queue_info[queue].ra_sta_id;
571 tid = mvm->queue_info[queue].txq_tid;
572 spin_unlock_bh(&mvm->queue_info_lock);
573
574 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
Sharon Dvire3df1e42017-02-21 10:41:31 +0200575 if (WARN_ON(!mvmsta))
576 return -EINVAL;
Sara Sharon01796ff2016-11-16 17:04:36 +0200577
578 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
579 /* Disable the queue */
580 if (disable_agg_tids)
581 iwl_mvm_invalidate_sta_queue(mvm, queue,
582 disable_agg_tids, false);
583
584 ret = iwl_mvm_disable_txq(mvm, queue,
585 mvmsta->vif->hw_queue[txq_curr_ac],
586 tid, 0);
587 if (ret) {
588 /* Re-mark the inactive queue as inactive */
589 spin_lock_bh(&mvm->queue_info_lock);
590 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
591 spin_unlock_bh(&mvm->queue_info_lock);
592 IWL_ERR(mvm,
593 "Failed to free inactive queue %d (ret=%d)\n",
594 queue, ret);
595
596 return ret;
597 }
598
599 /* If TXQ is allocated to another STA, update removal in FW */
600 if (!same_sta)
601 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
602
603 return 0;
604}
605
Liad Kaufman42db09c2016-05-02 14:01:14 +0300606static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
607 unsigned long tfd_queue_mask, u8 ac)
608{
609 int queue = 0;
610 u8 ac_to_queue[IEEE80211_NUM_ACS];
611 int i;
612
613 lockdep_assert_held(&mvm->queue_info_lock);
Sara Sharonbb497012016-09-29 14:52:40 +0300614 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
615 return -EINVAL;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300616
617 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
618
619 /* See what ACs the existing queues for this STA have */
620 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
621 /* Only DATA queues can be shared */
622 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
623 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
624 continue;
625
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200626 /* Don't try and take queues being reconfigured */
627 if (mvm->queue_info[queue].status ==
628 IWL_MVM_QUEUE_RECONFIGURING)
629 continue;
630
Liad Kaufman42db09c2016-05-02 14:01:14 +0300631 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
632 }
633
634 /*
635 * The queue to share is chosen only from DATA queues as follows (in
636 * descending priority):
637 * 1. An AC_BE queue
638 * 2. Same AC queue
639 * 3. Highest AC queue that is lower than new AC
640 * 4. Any existing AC (there always is at least 1 DATA queue)
641 */
642
643 /* Priority 1: An AC_BE queue */
644 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
645 queue = ac_to_queue[IEEE80211_AC_BE];
646 /* Priority 2: Same AC queue */
647 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
648 queue = ac_to_queue[ac];
649 /* Priority 3a: If new AC is VO and VI exists - use VI */
650 else if (ac == IEEE80211_AC_VO &&
651 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
652 queue = ac_to_queue[IEEE80211_AC_VI];
653 /* Priority 3b: No BE so only AC less than the new one is BK */
654 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
655 queue = ac_to_queue[IEEE80211_AC_BK];
656 /* Priority 4a: No BE nor BK - use VI if exists */
657 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
658 queue = ac_to_queue[IEEE80211_AC_VI];
659 /* Priority 4b: No BE, BK nor VI - use VO if exists */
660 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
661 queue = ac_to_queue[IEEE80211_AC_VO];
662
663 /* Make sure queue found (or not) is legal */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200664 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
665 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
666 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
Liad Kaufman42db09c2016-05-02 14:01:14 +0300667 IWL_ERR(mvm, "No DATA queues available to share\n");
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200668 return -ENOSPC;
669 }
670
671 /* Make sure the queue isn't in the middle of being reconfigured */
672 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
673 IWL_ERR(mvm,
674 "TXQ %d is in the middle of re-config - try again\n",
675 queue);
676 return -EBUSY;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300677 }
678
679 return queue;
680}
681
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200682/*
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200683 * If a given queue has a higher AC than the TID stream that is being compared
684 * to, the queue needs to be redirected to the lower AC. This function does that
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200685 * in such a case, otherwise - if no redirection required - it does nothing,
686 * unless the %force param is true.
687 */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200688int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
689 int ac, int ssn, unsigned int wdg_timeout,
690 bool force)
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200691{
692 struct iwl_scd_txq_cfg_cmd cmd = {
693 .scd_queue = queue,
Liad Kaufmanf7c692d2016-03-08 10:41:32 +0200694 .action = SCD_CFG_DISABLE_QUEUE,
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200695 };
696 bool shared_queue;
697 unsigned long mq;
698 int ret;
699
Sara Sharonbb497012016-09-29 14:52:40 +0300700 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
701 return -EINVAL;
702
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200703 /*
704 * If the AC is lower than current one - FIFO needs to be redirected to
705 * the lowest one of the streams in the queue. Check if this is needed
706 * here.
707 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
708 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
709 * we need to check if the numerical value of X is LARGER than of Y.
710 */
711 spin_lock_bh(&mvm->queue_info_lock);
712 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
713 spin_unlock_bh(&mvm->queue_info_lock);
714
715 IWL_DEBUG_TX_QUEUES(mvm,
716 "No redirection needed on TXQ #%d\n",
717 queue);
718 return 0;
719 }
720
721 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
722 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200723 cmd.tid = mvm->queue_info[queue].txq_tid;
Sara Sharon34e10862017-02-23 13:15:07 +0200724 mq = mvm->hw_queue_to_mac80211[queue];
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200725 shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
726 spin_unlock_bh(&mvm->queue_info_lock);
727
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200728 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200729 queue, iwl_mvm_ac_to_tx_fifo[ac]);
730
731 /* Stop MAC queues and wait for this queue to empty */
732 iwl_mvm_stop_mac_queues(mvm, mq);
Sara Sharona1a57872017-03-05 11:38:58 +0200733 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200734 if (ret) {
735 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
736 queue);
737 ret = -EIO;
738 goto out;
739 }
740
741 /* Before redirecting the queue we need to de-activate it */
742 iwl_trans_txq_disable(mvm->trans, queue, false);
743 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
744 if (ret)
745 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
746 ret);
747
748 /* Make sure the SCD wrptr is correctly set before reconfiguring */
Sara Sharonca3b9c62016-06-30 16:14:02 +0300749 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200750
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200751 /* Update the TID "owner" of the queue */
752 spin_lock_bh(&mvm->queue_info_lock);
753 mvm->queue_info[queue].txq_tid = tid;
754 spin_unlock_bh(&mvm->queue_info_lock);
755
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200756 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
757
758 /* Redirect to lower AC */
759 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
Sara Sharon0ec9257b2017-10-16 09:45:10 +0300760 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200761
762 /* Update AC marking of the queue */
763 spin_lock_bh(&mvm->queue_info_lock);
764 mvm->queue_info[queue].mac80211_ac = ac;
765 spin_unlock_bh(&mvm->queue_info_lock);
766
767 /*
768 * Mark queue as shared in transport if shared
769 * Note this has to be done after queue enablement because enablement
770 * can also set this value, and there is no indication there to shared
771 * queues
772 */
773 if (shared_queue)
774 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
775
776out:
777 /* Continue using the MAC queues */
778 iwl_mvm_start_mac_queues(mvm, mq);
779
780 return ret;
781}
782
Johannes Berg99448a82018-07-04 11:38:34 +0200783static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
784 u8 minq, u8 maxq)
785{
786 int i;
787
788 lockdep_assert_held(&mvm->queue_info_lock);
789
790 /* This should not be hit with new TX path */
791 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
792 return -ENOSPC;
793
794 /* Start by looking for a free queue */
795 for (i = minq; i <= maxq; i++)
796 if (mvm->queue_info[i].hw_queue_refcount == 0 &&
797 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
798 return i;
799
800 return -ENOSPC;
801}
802
803static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
804 u8 sta_id, u8 tid, unsigned int timeout)
805{
806 int queue, size = IWL_DEFAULT_QUEUE_SIZE;
807
808 if (tid == IWL_MAX_TID_COUNT) {
809 tid = IWL_MGMT_TID;
810 size = IWL_MGMT_QUEUE_SIZE;
811 }
812 queue = iwl_trans_txq_alloc(mvm->trans,
813 cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
814 sta_id, tid, SCD_QUEUE_CFG, size, timeout);
815
816 if (queue < 0) {
817 IWL_DEBUG_TX_QUEUES(mvm,
818 "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
819 sta_id, tid, queue);
820 return queue;
821 }
822
823 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
824 queue, sta_id, tid);
825
826 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
827 IWL_DEBUG_TX_QUEUES(mvm,
828 "Enabling TXQ #%d (mac80211 map:0x%x)\n",
829 queue, mvm->hw_queue_to_mac80211[queue]);
830
831 return queue;
832}
833
Sara Sharon310181e2017-01-17 14:27:48 +0200834static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
835 struct ieee80211_sta *sta, u8 ac,
836 int tid)
837{
838 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
839 unsigned int wdg_timeout =
840 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
841 u8 mac_queue = mvmsta->vif->hw_queue[ac];
842 int queue = -1;
843
844 lockdep_assert_held(&mvm->mutex);
845
846 IWL_DEBUG_TX_QUEUES(mvm,
847 "Allocating queue for sta %d on tid %d\n",
848 mvmsta->sta_id, tid);
849 queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
850 wdg_timeout);
851 if (queue < 0)
852 return queue;
853
854 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
855
856 spin_lock_bh(&mvmsta->lock);
857 mvmsta->tid_data[tid].txq_id = queue;
858 mvmsta->tid_data[tid].is_tid_active = true;
Sara Sharon310181e2017-01-17 14:27:48 +0200859 spin_unlock_bh(&mvmsta->lock);
860
Sara Sharon310181e2017-01-17 14:27:48 +0200861 return 0;
862}
863
Johannes Berg99448a82018-07-04 11:38:34 +0200864static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
865 int mac80211_queue, u8 sta_id, u8 tid)
866{
867 bool enable_queue = true;
868
869 spin_lock_bh(&mvm->queue_info_lock);
870
871 /* Make sure this TID isn't already enabled */
872 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
873 spin_unlock_bh(&mvm->queue_info_lock);
874 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
875 queue, tid);
876 return false;
877 }
878
879 /* Update mappings and refcounts */
880 if (mvm->queue_info[queue].hw_queue_refcount > 0)
881 enable_queue = false;
882
883 if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
884 WARN(mac80211_queue >=
885 BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
886 "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
887 mac80211_queue, queue, sta_id, tid);
888 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
889 }
890
891 mvm->queue_info[queue].hw_queue_refcount++;
892 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
893 mvm->queue_info[queue].ra_sta_id = sta_id;
894
895 if (enable_queue) {
896 if (tid != IWL_MAX_TID_COUNT)
897 mvm->queue_info[queue].mac80211_ac =
898 tid_to_mac80211_ac[tid];
899 else
900 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
901
902 mvm->queue_info[queue].txq_tid = tid;
903 }
904
905 IWL_DEBUG_TX_QUEUES(mvm,
906 "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
907 queue, mvm->queue_info[queue].hw_queue_refcount,
908 mvm->hw_queue_to_mac80211[queue]);
909
910 spin_unlock_bh(&mvm->queue_info_lock);
911
912 return enable_queue;
913}
914
915static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
916 int mac80211_queue, u16 ssn,
917 const struct iwl_trans_txq_scd_cfg *cfg,
918 unsigned int wdg_timeout)
919{
920 struct iwl_scd_txq_cfg_cmd cmd = {
921 .scd_queue = queue,
922 .action = SCD_CFG_ENABLE_QUEUE,
923 .window = cfg->frame_limit,
924 .sta_id = cfg->sta_id,
925 .ssn = cpu_to_le16(ssn),
926 .tx_fifo = cfg->fifo,
927 .aggregate = cfg->aggregate,
928 .tid = cfg->tid,
929 };
930 bool inc_ssn;
931
932 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
933 return false;
934
935 /* Send the enabling command if we need to */
936 if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
937 cfg->sta_id, cfg->tid))
938 return false;
939
940 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
941 NULL, wdg_timeout);
942 if (inc_ssn)
943 le16_add_cpu(&cmd.ssn, 1);
944
945 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
946 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
947
948 return inc_ssn;
949}
950
Liad Kaufman24afba72015-07-28 18:56:08 +0300951static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
952 struct ieee80211_sta *sta, u8 ac, int tid,
953 struct ieee80211_hdr *hdr)
954{
955 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
956 struct iwl_trans_txq_scd_cfg cfg = {
Emmanuel Grumbachcf6c6ea2017-06-13 13:18:48 +0300957 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
Liad Kaufman24afba72015-07-28 18:56:08 +0300958 .sta_id = mvmsta->sta_id,
959 .tid = tid,
960 .frame_limit = IWL_FRAME_LIMIT,
961 };
962 unsigned int wdg_timeout =
963 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
964 u8 mac_queue = mvmsta->vif->hw_queue[ac];
965 int queue = -1;
Sara Sharon01796ff2016-11-16 17:04:36 +0200966 bool using_inactive_queue = false, same_sta = false;
Liad Kaufman9794c642015-08-19 17:34:28 +0300967 unsigned long disable_agg_tids = 0;
968 enum iwl_mvm_agg_state queue_state;
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +0300969 bool shared_queue = false, inc_ssn;
Liad Kaufman24afba72015-07-28 18:56:08 +0300970 int ssn;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300971 unsigned long tfd_queue_mask;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300972 int ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300973
974 lockdep_assert_held(&mvm->mutex);
975
Sara Sharon310181e2017-01-17 14:27:48 +0200976 if (iwl_mvm_has_new_tx_api(mvm))
977 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
978
Liad Kaufman42db09c2016-05-02 14:01:14 +0300979 spin_lock_bh(&mvmsta->lock);
980 tfd_queue_mask = mvmsta->tfd_queue_msk;
981 spin_unlock_bh(&mvmsta->lock);
982
Liad Kaufmand2515a92016-03-23 16:31:08 +0200983 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300984
985 /*
986 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
987 * exists
988 */
989 if (!ieee80211_is_data_qos(hdr->frame_control) ||
990 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300991 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
992 IWL_MVM_DQA_MIN_MGMT_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300993 IWL_MVM_DQA_MAX_MGMT_QUEUE);
994 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
995 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
996 queue);
997
998 /* If no such queue is found, we'll use a DATA queue instead */
999 }
1000
Liad Kaufman9794c642015-08-19 17:34:28 +03001001 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1002 (mvm->queue_info[mvmsta->reserved_queue].status ==
1003 IWL_MVM_QUEUE_RESERVED ||
1004 mvm->queue_info[mvmsta->reserved_queue].status ==
1005 IWL_MVM_QUEUE_INACTIVE)) {
Liad Kaufman24afba72015-07-28 18:56:08 +03001006 queue = mvmsta->reserved_queue;
Liad Kaufman9794c642015-08-19 17:34:28 +03001007 mvm->queue_info[queue].reserved = true;
Liad Kaufman24afba72015-07-28 18:56:08 +03001008 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1009 }
1010
1011 if (queue < 0)
Liad Kaufman9794c642015-08-19 17:34:28 +03001012 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1013 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +03001014 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufmancf961e12015-08-13 19:16:08 +03001015
1016 /*
Liad Kaufman9794c642015-08-19 17:34:28 +03001017 * Check if this queue is already allocated but inactive.
1018 * In such a case, we'll need to first free this queue before enabling
1019 * it again, so we'll mark it as reserved to make sure no new traffic
1020 * arrives on it
1021 */
1022 if (queue > 0 &&
1023 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
1024 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1025 using_inactive_queue = true;
Sara Sharon01796ff2016-11-16 17:04:36 +02001026 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
Liad Kaufman9794c642015-08-19 17:34:28 +03001027 IWL_DEBUG_TX_QUEUES(mvm,
1028 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
1029 queue, mvmsta->sta_id, tid);
1030 }
1031
Liad Kaufman42db09c2016-05-02 14:01:14 +03001032 /* No free queue - we'll have to share */
1033 if (queue <= 0) {
1034 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1035 if (queue > 0) {
1036 shared_queue = true;
1037 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1038 }
1039 }
1040
Liad Kaufman9794c642015-08-19 17:34:28 +03001041 /*
Liad Kaufmancf961e12015-08-13 19:16:08 +03001042 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1043 * to make sure no one else takes it.
1044 * This will allow avoiding re-acquiring the lock at the end of the
1045 * configuration. On error we'll mark it back as free.
1046 */
Liad Kaufman42db09c2016-05-02 14:01:14 +03001047 if ((queue > 0) && !shared_queue)
Liad Kaufmancf961e12015-08-13 19:16:08 +03001048 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman24afba72015-07-28 18:56:08 +03001049
Liad Kaufmand2515a92016-03-23 16:31:08 +02001050 spin_unlock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +03001051
Liad Kaufman42db09c2016-05-02 14:01:14 +03001052 /* This shouldn't happen - out of queues */
1053 if (WARN_ON(queue <= 0)) {
1054 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1055 tid, cfg.sta_id);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001056 return queue;
Liad Kaufman42db09c2016-05-02 14:01:14 +03001057 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001058
1059 /*
1060 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1061 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1062 * as aggregatable.
1063 * Mark all DATA queues as allowing to be aggregated at some point
1064 */
Liad Kaufmand5216a22015-08-09 15:50:51 +03001065 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1066 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +03001067
Liad Kaufman9794c642015-08-19 17:34:28 +03001068 /*
1069 * If this queue was previously inactive (idle) - we need to free it
1070 * first
1071 */
1072 if (using_inactive_queue) {
Sara Sharon01796ff2016-11-16 17:04:36 +02001073 ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
1074 if (ret)
Liad Kaufman9794c642015-08-19 17:34:28 +03001075 return ret;
Liad Kaufman9794c642015-08-19 17:34:28 +03001076 }
1077
Liad Kaufman42db09c2016-05-02 14:01:14 +03001078 IWL_DEBUG_TX_QUEUES(mvm,
1079 "Allocating %squeue #%d to sta %d on tid %d\n",
1080 shared_queue ? "shared " : "", queue,
1081 mvmsta->sta_id, tid);
1082
1083 if (shared_queue) {
1084 /* Disable any open aggs on this queue */
1085 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1086
1087 if (disable_agg_tids) {
1088 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1089 queue);
1090 iwl_mvm_invalidate_sta_queue(mvm, queue,
1091 disable_agg_tids, false);
1092 }
Liad Kaufman42db09c2016-05-02 14:01:14 +03001093 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001094
1095 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +03001096 inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
1097 ssn, &cfg, wdg_timeout);
1098 if (inc_ssn) {
1099 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1100 le16_add_cpu(&hdr->seq_ctrl, 0x10);
1101 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001102
Liad Kaufman58f2cc52015-09-30 16:44:28 +02001103 /*
1104 * Mark queue as shared in transport if shared
1105 * Note this has to be done after queue enablement because enablement
1106 * can also set this value, and there is no indication there to shared
1107 * queues
1108 */
1109 if (shared_queue)
1110 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1111
Liad Kaufman24afba72015-07-28 18:56:08 +03001112 spin_lock_bh(&mvmsta->lock);
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +03001113 /*
1114 * This looks racy, but it is not. We have only one packet for
1115 * this ra/tid in our Tx path since we stop the Qdisc when we
1116 * need to allocate a new TFD queue.
1117 */
1118 if (inc_ssn)
1119 mvmsta->tid_data[tid].seq_number += 0x10;
Liad Kaufman24afba72015-07-28 18:56:08 +03001120 mvmsta->tid_data[tid].txq_id = queue;
Liad Kaufman9794c642015-08-19 17:34:28 +03001121 mvmsta->tid_data[tid].is_tid_active = true;
Liad Kaufman24afba72015-07-28 18:56:08 +03001122 mvmsta->tfd_queue_msk |= BIT(queue);
Liad Kaufman9794c642015-08-19 17:34:28 +03001123 queue_state = mvmsta->tid_data[tid].state;
Liad Kaufman24afba72015-07-28 18:56:08 +03001124
1125 if (mvmsta->reserved_queue == queue)
1126 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1127 spin_unlock_bh(&mvmsta->lock);
1128
Liad Kaufman42db09c2016-05-02 14:01:14 +03001129 if (!shared_queue) {
1130 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1131 if (ret)
1132 goto out_err;
Liad Kaufmancf961e12015-08-13 19:16:08 +03001133
Liad Kaufman42db09c2016-05-02 14:01:14 +03001134 /* If we need to re-enable aggregations... */
1135 if (queue_state == IWL_AGG_ON) {
1136 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1137 if (ret)
1138 goto out_err;
1139 }
Liad Kaufman58f2cc52015-09-30 16:44:28 +02001140 } else {
1141 /* Redirect queue, if needed */
1142 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
1143 wdg_timeout, false);
1144 if (ret)
1145 goto out_err;
Liad Kaufman42db09c2016-05-02 14:01:14 +03001146 }
Liad Kaufman9794c642015-08-19 17:34:28 +03001147
Liad Kaufman42db09c2016-05-02 14:01:14 +03001148 return 0;
Liad Kaufmancf961e12015-08-13 19:16:08 +03001149
1150out_err:
1151 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
1152
1153 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +03001154}
1155
Liad Kaufman19aefa42016-03-08 14:29:51 +02001156static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
1157{
1158 struct iwl_scd_txq_cfg_cmd cmd = {
1159 .scd_queue = queue,
1160 .action = SCD_CFG_UPDATE_QUEUE_TID,
1161 };
Liad Kaufman19aefa42016-03-08 14:29:51 +02001162 int tid;
1163 unsigned long tid_bitmap;
1164 int ret;
1165
1166 lockdep_assert_held(&mvm->mutex);
1167
Sara Sharonbb497012016-09-29 14:52:40 +03001168 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1169 return;
1170
Liad Kaufman19aefa42016-03-08 14:29:51 +02001171 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001172 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1173 spin_unlock_bh(&mvm->queue_info_lock);
1174
1175 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
1176 return;
1177
1178 /* Find any TID for queue */
1179 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1180 cmd.tid = tid;
1181 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
1182
1183 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
Liad Kaufman341ca402016-09-18 14:51:59 +03001184 if (ret) {
Liad Kaufman19aefa42016-03-08 14:29:51 +02001185 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
1186 queue, ret);
Liad Kaufman341ca402016-09-18 14:51:59 +03001187 return;
1188 }
1189
1190 spin_lock_bh(&mvm->queue_info_lock);
1191 mvm->queue_info[queue].txq_tid = tid;
1192 spin_unlock_bh(&mvm->queue_info_lock);
1193 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
1194 queue, tid);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001195}
1196
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001197static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
1198{
1199 struct ieee80211_sta *sta;
1200 struct iwl_mvm_sta *mvmsta;
Sharon Dvir806911d2017-05-21 12:09:49 +03001201 u8 sta_id;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001202 int tid = -1;
1203 unsigned long tid_bitmap;
1204 unsigned int wdg_timeout;
1205 int ssn;
1206 int ret = true;
1207
Sara Sharonbb497012016-09-29 14:52:40 +03001208 /* queue sharing is disabled on new TX path */
1209 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1210 return;
1211
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001212 lockdep_assert_held(&mvm->mutex);
1213
1214 spin_lock_bh(&mvm->queue_info_lock);
1215 sta_id = mvm->queue_info[queue].ra_sta_id;
1216 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1217 spin_unlock_bh(&mvm->queue_info_lock);
1218
1219 /* Find TID for queue, and make sure it is the only one on the queue */
1220 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1221 if (tid_bitmap != BIT(tid)) {
1222 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
1223 queue, tid_bitmap);
1224 return;
1225 }
1226
1227 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1228 tid);
1229
1230 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1231 lockdep_is_held(&mvm->mutex));
1232
1233 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1234 return;
1235
1236 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1237 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1238
1239 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1240
1241 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
1242 tid_to_mac80211_ac[tid], ssn,
1243 wdg_timeout, true);
1244 if (ret) {
1245 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1246 return;
1247 }
1248
1249 /* If aggs should be turned back on - do it */
1250 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
Emmanuel Grumbach9cd70e82016-09-20 13:40:33 +03001251 struct iwl_mvm_add_sta_cmd cmd = {0};
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001252
1253 mvmsta->tid_disable_agg &= ~BIT(tid);
1254
1255 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1256 cmd.sta_id = mvmsta->sta_id;
1257 cmd.add_modify = STA_MODE_MODIFY;
1258 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1259 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1260 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1261
1262 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1263 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1264 if (!ret) {
1265 IWL_DEBUG_TX_QUEUES(mvm,
1266 "TXQ #%d is now aggregated again\n",
1267 queue);
1268
1269 /* Mark queue intenally as aggregating again */
1270 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1271 }
1272 }
1273
1274 spin_lock_bh(&mvm->queue_info_lock);
1275 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1276 spin_unlock_bh(&mvm->queue_info_lock);
1277}
1278
Johannes Berg99448a82018-07-04 11:38:34 +02001279/*
1280 * Remove inactive TIDs of a given queue.
1281 * If all queue TIDs are inactive - mark the queue as inactive
1282 * If only some the queue TIDs are inactive - unmap them from the queue
1283 */
1284static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1285 struct iwl_mvm_sta *mvmsta, int queue,
1286 unsigned long tid_bitmap)
1287{
1288 int tid;
1289
1290 lockdep_assert_held(&mvmsta->lock);
1291 lockdep_assert_held(&mvm->queue_info_lock);
1292
1293 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1294 return;
1295
1296 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1297 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1298 /* If some TFDs are still queued - don't mark TID as inactive */
1299 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1300 tid_bitmap &= ~BIT(tid);
1301
1302 /* Don't mark as inactive any TID that has an active BA */
1303 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1304 tid_bitmap &= ~BIT(tid);
1305 }
1306
1307 /* If all TIDs in the queue are inactive - mark queue as inactive. */
1308 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1309 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
1310
1311 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1)
1312 mvmsta->tid_data[tid].is_tid_active = false;
1313
1314 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n",
1315 queue);
1316 return;
1317 }
1318
1319 /*
1320 * If we are here, this is a shared queue and not all TIDs timed-out.
1321 * Remove the ones that did.
1322 */
1323 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1324 int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
1325
1326 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1327 mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
1328 mvm->queue_info[queue].hw_queue_refcount--;
1329 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1330 mvmsta->tid_data[tid].is_tid_active = false;
1331
1332 IWL_DEBUG_TX_QUEUES(mvm,
1333 "Removing inactive TID %d from shared Q:%d\n",
1334 tid, queue);
1335 }
1336
1337 IWL_DEBUG_TX_QUEUES(mvm,
1338 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1339 mvm->queue_info[queue].tid_bitmap);
1340
1341 /*
1342 * There may be different TIDs with the same mac queues, so make
1343 * sure all TIDs have existing corresponding mac queues enabled
1344 */
1345 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1346 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1347 mvm->hw_queue_to_mac80211[queue] |=
1348 BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
1349 }
1350
1351 /* If the queue is marked as shared - "unshare" it */
1352 if (mvm->queue_info[queue].hw_queue_refcount == 1 &&
1353 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1354 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
1355 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1356 queue);
1357 }
1358}
1359
1360static void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
1361{
1362 unsigned long timeout_queues_map = 0;
1363 unsigned long now = jiffies;
1364 int i;
1365
1366 if (iwl_mvm_has_new_tx_api(mvm))
1367 return;
1368
1369 spin_lock_bh(&mvm->queue_info_lock);
1370 for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
1371 if (mvm->queue_info[i].hw_queue_refcount > 0)
1372 timeout_queues_map |= BIT(i);
1373 spin_unlock_bh(&mvm->queue_info_lock);
1374
1375 rcu_read_lock();
1376
1377 /*
1378 * If a queue times out - mark it as INACTIVE (don't remove right away
1379 * if we don't have to.) This is an optimization in case traffic comes
1380 * later, and we don't HAVE to use a currently-inactive queue
1381 */
1382 for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) {
1383 struct ieee80211_sta *sta;
1384 struct iwl_mvm_sta *mvmsta;
1385 u8 sta_id;
1386 int tid;
1387 unsigned long inactive_tid_bitmap = 0;
1388 unsigned long queue_tid_bitmap;
1389
1390 spin_lock_bh(&mvm->queue_info_lock);
1391 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1392
1393 /* If TXQ isn't in active use anyway - nothing to do here... */
1394 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1395 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) {
1396 spin_unlock_bh(&mvm->queue_info_lock);
1397 continue;
1398 }
1399
1400 /* Check to see if there are inactive TIDs on this queue */
1401 for_each_set_bit(tid, &queue_tid_bitmap,
1402 IWL_MAX_TID_COUNT + 1) {
1403 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1404 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1405 continue;
1406
1407 inactive_tid_bitmap |= BIT(tid);
1408 }
1409 spin_unlock_bh(&mvm->queue_info_lock);
1410
1411 /* If all TIDs are active - finish check on this queue */
1412 if (!inactive_tid_bitmap)
1413 continue;
1414
1415 /*
1416 * If we are here - the queue hadn't been served recently and is
1417 * in use
1418 */
1419
1420 sta_id = mvm->queue_info[i].ra_sta_id;
1421 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1422
1423 /*
1424 * If the STA doesn't exist anymore, it isn't an error. It could
1425 * be that it was removed since getting the queues, and in this
1426 * case it should've inactivated its queues anyway.
1427 */
1428 if (IS_ERR_OR_NULL(sta))
1429 continue;
1430
1431 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1432
1433 spin_lock_bh(&mvmsta->lock);
1434 spin_lock(&mvm->queue_info_lock);
1435 iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1436 inactive_tid_bitmap);
1437 spin_unlock(&mvm->queue_info_lock);
1438 spin_unlock_bh(&mvmsta->lock);
1439 }
1440
1441 rcu_read_unlock();
1442}
1443
Liad Kaufman24afba72015-07-28 18:56:08 +03001444static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1445{
1446 if (tid == IWL_MAX_TID_COUNT)
1447 return IEEE80211_AC_VO; /* MGMT */
1448
1449 return tid_to_mac80211_ac[tid];
1450}
1451
1452static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
1453 struct ieee80211_sta *sta, int tid)
1454{
1455 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1456 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1457 struct sk_buff *skb;
1458 struct ieee80211_hdr *hdr;
1459 struct sk_buff_head deferred_tx;
1460 u8 mac_queue;
1461 bool no_queue = false; /* Marks if there is a problem with the queue */
1462 u8 ac;
1463
1464 lockdep_assert_held(&mvm->mutex);
1465
1466 skb = skb_peek(&tid_data->deferred_tx_frames);
1467 if (!skb)
1468 return;
1469 hdr = (void *)skb->data;
1470
1471 ac = iwl_mvm_tid_to_ac_queue(tid);
1472 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1473
Sara Sharon6862fce2017-02-22 19:34:17 +02001474 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
Liad Kaufman24afba72015-07-28 18:56:08 +03001475 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1476 IWL_ERR(mvm,
1477 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1478 mvmsta->sta_id, tid);
1479
1480 /*
1481 * Mark queue as problematic so later the deferred traffic is
1482 * freed, as we can do nothing with it
1483 */
1484 no_queue = true;
1485 }
1486
1487 __skb_queue_head_init(&deferred_tx);
1488
Liad Kaufmand2515a92016-03-23 16:31:08 +02001489 /* Disable bottom-halves when entering TX path */
1490 local_bh_disable();
Liad Kaufman24afba72015-07-28 18:56:08 +03001491 spin_lock(&mvmsta->lock);
1492 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
Liad Kaufmanad5de732016-09-27 16:01:10 +03001493 mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
Liad Kaufman24afba72015-07-28 18:56:08 +03001494 spin_unlock(&mvmsta->lock);
1495
Liad Kaufman24afba72015-07-28 18:56:08 +03001496 while ((skb = __skb_dequeue(&deferred_tx)))
1497 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1498 ieee80211_free_txskb(mvm->hw, skb);
1499 local_bh_enable();
1500
1501 /* Wake queue */
1502 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1503}
1504
1505void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1506{
1507 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1508 add_stream_wk);
1509 struct ieee80211_sta *sta;
1510 struct iwl_mvm_sta *mvmsta;
1511 unsigned long deferred_tid_traffic;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001512 int queue, sta_id, tid;
Liad Kaufman24afba72015-07-28 18:56:08 +03001513
Liad Kaufman9794c642015-08-19 17:34:28 +03001514 /* Check inactivity of queues */
1515 iwl_mvm_inactivity_check(mvm);
1516
Liad Kaufman24afba72015-07-28 18:56:08 +03001517 mutex_lock(&mvm->mutex);
1518
Sara Sharon34e10862017-02-23 13:15:07 +02001519 /* No queue reconfiguration in TVQM mode */
1520 if (iwl_mvm_has_new_tx_api(mvm))
1521 goto alloc_queues;
1522
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001523 /* Reconfigure queues requiring reconfiguation */
Sara Sharon34e10862017-02-23 13:15:07 +02001524 for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001525 bool reconfig;
Liad Kaufman19aefa42016-03-08 14:29:51 +02001526 bool change_owner;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001527
1528 spin_lock_bh(&mvm->queue_info_lock);
1529 reconfig = (mvm->queue_info[queue].status ==
1530 IWL_MVM_QUEUE_RECONFIGURING);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001531
1532 /*
1533 * We need to take into account a situation in which a TXQ was
1534 * allocated to TID x, and then turned shared by adding TIDs y
1535 * and z. If TID x becomes inactive and is removed from the TXQ,
1536 * ownership must be given to one of the remaining TIDs.
1537 * This is mainly because if TID x continues - a new queue can't
1538 * be allocated for it as long as it is an owner of another TXQ.
1539 */
1540 change_owner = !(mvm->queue_info[queue].tid_bitmap &
1541 BIT(mvm->queue_info[queue].txq_tid)) &&
1542 (mvm->queue_info[queue].status ==
1543 IWL_MVM_QUEUE_SHARED);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001544 spin_unlock_bh(&mvm->queue_info_lock);
1545
1546 if (reconfig)
1547 iwl_mvm_unshare_queue(mvm, queue);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001548 else if (change_owner)
1549 iwl_mvm_change_queue_owner(mvm, queue);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001550 }
1551
Sara Sharon34e10862017-02-23 13:15:07 +02001552alloc_queues:
Liad Kaufman24afba72015-07-28 18:56:08 +03001553 /* Go over all stations with deferred traffic */
1554 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1555 IWL_MVM_STATION_COUNT) {
1556 clear_bit(sta_id, mvm->sta_deferred_frames);
1557 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1558 lockdep_is_held(&mvm->mutex));
1559 if (IS_ERR_OR_NULL(sta))
1560 continue;
1561
1562 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1563 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1564
1565 for_each_set_bit(tid, &deferred_tid_traffic,
1566 IWL_MAX_TID_COUNT + 1)
1567 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1568 }
1569
1570 mutex_unlock(&mvm->mutex);
1571}
1572
1573static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001574 struct ieee80211_sta *sta,
1575 enum nl80211_iftype vif_type)
Liad Kaufman24afba72015-07-28 18:56:08 +03001576{
1577 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1578 int queue;
Sara Sharon01796ff2016-11-16 17:04:36 +02001579 bool using_inactive_queue = false, same_sta = false;
Liad Kaufman24afba72015-07-28 18:56:08 +03001580
Sara Sharon396952e2017-02-22 19:40:55 +02001581 /* queue reserving is disabled on new TX path */
1582 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1583 return 0;
1584
Liad Kaufman9794c642015-08-19 17:34:28 +03001585 /*
1586 * Check for inactive queues, so we don't reach a situation where we
1587 * can't add a STA due to a shortage in queues that doesn't really exist
1588 */
1589 iwl_mvm_inactivity_check(mvm);
1590
Liad Kaufman24afba72015-07-28 18:56:08 +03001591 spin_lock_bh(&mvm->queue_info_lock);
1592
1593 /* Make sure we have free resources for this STA */
Liad Kaufmand5216a22015-08-09 15:50:51 +03001594 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1595 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
Liad Kaufmancf961e12015-08-13 19:16:08 +03001596 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1597 IWL_MVM_QUEUE_FREE))
Liad Kaufmand5216a22015-08-09 15:50:51 +03001598 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1599 else
Liad Kaufman9794c642015-08-19 17:34:28 +03001600 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1601 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001602 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +03001603 if (queue < 0) {
1604 spin_unlock_bh(&mvm->queue_info_lock);
1605 IWL_ERR(mvm, "No available queues for new station\n");
1606 return -ENOSPC;
Sara Sharon01796ff2016-11-16 17:04:36 +02001607 } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
1608 /*
1609 * If this queue is already allocated but inactive we'll need to
1610 * first free this queue before enabling it again, we'll mark
1611 * it as reserved to make sure no new traffic arrives on it
1612 */
1613 using_inactive_queue = true;
1614 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
Liad Kaufman24afba72015-07-28 18:56:08 +03001615 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03001616 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman24afba72015-07-28 18:56:08 +03001617
1618 spin_unlock_bh(&mvm->queue_info_lock);
1619
1620 mvmsta->reserved_queue = queue;
1621
Sara Sharon01796ff2016-11-16 17:04:36 +02001622 if (using_inactive_queue)
1623 iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
1624
Liad Kaufman24afba72015-07-28 18:56:08 +03001625 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1626 queue, mvmsta->sta_id);
1627
1628 return 0;
1629}
1630
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001631/*
1632 * In DQA mode, after a HW restart the queues should be allocated as before, in
1633 * order to avoid race conditions when there are shared queues. This function
1634 * does the re-mapping and queue allocation.
1635 *
1636 * Note that re-enabling aggregations isn't done in this function.
1637 */
1638static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1639 struct iwl_mvm_sta *mvm_sta)
1640{
1641 unsigned int wdg_timeout =
1642 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1643 int i;
1644 struct iwl_trans_txq_scd_cfg cfg = {
1645 .sta_id = mvm_sta->sta_id,
1646 .frame_limit = IWL_FRAME_LIMIT,
1647 };
1648
Johannes Berg03c902b2016-12-02 12:03:36 +01001649 /* Make sure reserved queue is still marked as such (if allocated) */
1650 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1651 mvm->queue_info[mvm_sta->reserved_queue].status =
1652 IWL_MVM_QUEUE_RESERVED;
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001653
1654 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1655 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1656 int txq_id = tid_data->txq_id;
1657 int ac;
1658 u8 mac_queue;
1659
Sara Sharon6862fce2017-02-22 19:34:17 +02001660 if (txq_id == IWL_MVM_INVALID_QUEUE)
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001661 continue;
1662
1663 skb_queue_head_init(&tid_data->deferred_tx_frames);
1664
1665 ac = tid_to_mac80211_ac[i];
1666 mac_queue = mvm_sta->vif->hw_queue[ac];
1667
Sara Sharon310181e2017-01-17 14:27:48 +02001668 if (iwl_mvm_has_new_tx_api(mvm)) {
1669 IWL_DEBUG_TX_QUEUES(mvm,
1670 "Re-mapping sta %d tid %d\n",
1671 mvm_sta->sta_id, i);
1672 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
1673 mvm_sta->sta_id,
1674 i, wdg_timeout);
1675 tid_data->txq_id = txq_id;
Liad Kaufman5d390512017-10-17 16:26:00 +03001676
1677 /*
1678 * Since we don't set the seq number after reset, and HW
1679 * sets it now, FW reset will cause the seq num to start
1680 * at 0 again, so driver will need to update it
1681 * internally as well, so it keeps in sync with real val
1682 */
1683 tid_data->seq_number = 0;
Sara Sharon310181e2017-01-17 14:27:48 +02001684 } else {
1685 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001686
Sara Sharon310181e2017-01-17 14:27:48 +02001687 cfg.tid = i;
Emmanuel Grumbachcf6c6ea2017-06-13 13:18:48 +03001688 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
Sara Sharon310181e2017-01-17 14:27:48 +02001689 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1690 txq_id ==
1691 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001692
Sara Sharon310181e2017-01-17 14:27:48 +02001693 IWL_DEBUG_TX_QUEUES(mvm,
1694 "Re-mapping sta %d tid %d to queue %d\n",
1695 mvm_sta->sta_id, i, txq_id);
1696
1697 iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
1698 wdg_timeout);
Sara Sharon34e10862017-02-23 13:15:07 +02001699 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
Sara Sharon310181e2017-01-17 14:27:48 +02001700 }
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001701 }
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001702}
1703
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001704static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1705 struct iwl_mvm_int_sta *sta,
1706 const u8 *addr,
1707 u16 mac_id, u16 color)
1708{
1709 struct iwl_mvm_add_sta_cmd cmd;
1710 int ret;
Luca Coelho3f497de2017-09-02 11:05:22 +03001711 u32 status = ADD_STA_SUCCESS;
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001712
1713 lockdep_assert_held(&mvm->mutex);
1714
1715 memset(&cmd, 0, sizeof(cmd));
1716 cmd.sta_id = sta->sta_id;
1717 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1718 color));
1719 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1720 cmd.station_type = sta->type;
1721
1722 if (!iwl_mvm_has_new_tx_api(mvm))
1723 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1724 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1725
1726 if (addr)
1727 memcpy(cmd.addr, addr, ETH_ALEN);
1728
1729 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1730 iwl_mvm_add_sta_cmd_size(mvm),
1731 &cmd, &status);
1732 if (ret)
1733 return ret;
1734
1735 switch (status & IWL_ADD_STA_STATUS_MASK) {
1736 case ADD_STA_SUCCESS:
1737 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1738 return 0;
1739 default:
1740 ret = -EIO;
1741 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1742 status);
1743 break;
1744 }
1745 return ret;
1746}
1747
Johannes Berg8ca151b2013-01-24 14:25:36 +01001748int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1749 struct ieee80211_vif *vif,
1750 struct ieee80211_sta *sta)
1751{
1752 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001753 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharona571f5f2015-12-07 12:50:58 +02001754 struct iwl_mvm_rxq_dup_data *dup_data;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001755 int i, ret, sta_id;
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001756 bool sta_update = false;
1757 unsigned int sta_flags = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001758
1759 lockdep_assert_held(&mvm->mutex);
1760
1761 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
Eliad Pellerb92e6612014-01-23 17:58:23 +02001762 sta_id = iwl_mvm_find_free_sta_id(mvm,
1763 ieee80211_vif_type_p2p(vif));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001764 else
1765 sta_id = mvm_sta->sta_id;
1766
Sara Sharon0ae98812017-01-04 14:53:58 +02001767 if (sta_id == IWL_MVM_INVALID_STA)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001768 return -ENOSPC;
1769
1770 spin_lock_init(&mvm_sta->lock);
1771
Johannes Bergc8f54702017-06-19 23:50:31 +02001772 /* if this is a HW restart re-alloc existing queues */
1773 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001774 struct iwl_mvm_int_sta tmp_sta = {
1775 .sta_id = sta_id,
1776 .type = mvm_sta->sta_type,
1777 };
1778
1779 /*
1780 * First add an empty station since allocating
1781 * a queue requires a valid station
1782 */
1783 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1784 mvmvif->id, mvmvif->color);
1785 if (ret)
1786 goto err;
1787
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001788 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001789 sta_update = true;
1790 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001791 goto update_fw;
1792 }
1793
Johannes Berg8ca151b2013-01-24 14:25:36 +01001794 mvm_sta->sta_id = sta_id;
1795 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1796 mvmvif->color);
1797 mvm_sta->vif = vif;
Liad Kaufmana58bb462017-05-28 14:20:04 +03001798 if (!mvm->trans->cfg->gen2)
1799 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1800 else
1801 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001802 mvm_sta->tx_protection = 0;
1803 mvm_sta->tt_tx_protection = false;
Sara Sharonced19f22017-02-06 19:09:32 +02001804 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001805
1806 /* HW restart, don't assume the memory has been zeroed */
Liad Kaufman69191af2015-09-01 18:50:22 +03001807 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
Johannes Berg8ca151b2013-01-24 14:25:36 +01001808 mvm_sta->tfd_queue_msk = 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001809
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001810 /* for HW restart - reset everything but the sequence number */
Liad Kaufman24afba72015-07-28 18:56:08 +03001811 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001812 u16 seq = mvm_sta->tid_data[i].seq_number;
1813 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1814 mvm_sta->tid_data[i].seq_number = seq;
Liad Kaufman24afba72015-07-28 18:56:08 +03001815
Liad Kaufman24afba72015-07-28 18:56:08 +03001816 /*
1817 * Mark all queues for this STA as unallocated and defer TX
1818 * frames until the queue is allocated
1819 */
Sara Sharon6862fce2017-02-22 19:34:17 +02001820 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman24afba72015-07-28 18:56:08 +03001821 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001822 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001823 mvm_sta->deferred_traffic_tid_map = 0;
Eyal Shapiraefed6642014-09-14 15:58:53 +03001824 mvm_sta->agg_tids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001825
Sara Sharona571f5f2015-12-07 12:50:58 +02001826 if (iwl_mvm_has_new_rx_api(mvm) &&
1827 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Johannes Berg92c4dca2017-06-07 10:35:54 +02001828 int q;
1829
Sara Sharona571f5f2015-12-07 12:50:58 +02001830 dup_data = kcalloc(mvm->trans->num_rx_queues,
Johannes Berg92c4dca2017-06-07 10:35:54 +02001831 sizeof(*dup_data), GFP_KERNEL);
Sara Sharona571f5f2015-12-07 12:50:58 +02001832 if (!dup_data)
1833 return -ENOMEM;
Johannes Berg92c4dca2017-06-07 10:35:54 +02001834 /*
1835 * Initialize all the last_seq values to 0xffff which can never
1836 * compare equal to the frame's seq_ctrl in the check in
1837 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1838 * number and fragmented packets don't reach that function.
1839 *
1840 * This thus allows receiving a packet with seqno 0 and the
1841 * retry bit set as the very first packet on a new TID.
1842 */
1843 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1844 memset(dup_data[q].last_seq, 0xff,
1845 sizeof(dup_data[q].last_seq));
Sara Sharona571f5f2015-12-07 12:50:58 +02001846 mvm_sta->dup_data = dup_data;
1847 }
1848
Johannes Bergc8f54702017-06-19 23:50:31 +02001849 if (!iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufmand5216a22015-08-09 15:50:51 +03001850 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1851 ieee80211_vif_type_p2p(vif));
Liad Kaufman24afba72015-07-28 18:56:08 +03001852 if (ret)
1853 goto err;
1854 }
1855
Gregory Greenman9f66a392017-11-05 18:49:48 +02001856 /*
1857 * if rs is registered with mac80211, then "add station" will be handled
1858 * via the corresponding ops, otherwise need to notify rate scaling here
1859 */
Emmanuel Grumbach4243edb2017-12-13 11:38:48 +02001860 if (iwl_mvm_has_tlc_offload(mvm))
Gregory Greenman9f66a392017-11-05 18:49:48 +02001861 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1862
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001863update_fw:
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001864 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001865 if (ret)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001866 goto err;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001867
Johannes Berg9e848012014-08-04 14:33:42 +02001868 if (vif->type == NL80211_IFTYPE_STATION) {
1869 if (!sta->tdls) {
Sara Sharon0ae98812017-01-04 14:53:58 +02001870 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001871 mvmvif->ap_sta_id = sta_id;
1872 } else {
Sara Sharon0ae98812017-01-04 14:53:58 +02001873 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001874 }
1875 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001876
1877 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1878
1879 return 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001880
1881err:
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001882 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001883}
1884
1885int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1886 bool drain)
1887{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001888 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01001889 int ret;
1890 u32 status;
1891
1892 lockdep_assert_held(&mvm->mutex);
1893
1894 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1895 cmd.sta_id = mvmsta->sta_id;
1896 cmd.add_modify = STA_MODE_MODIFY;
1897 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1898 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1899
1900 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02001901 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1902 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001903 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001904 if (ret)
1905 return ret;
1906
Sara Sharon837c4da2016-01-07 16:50:45 +02001907 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001908 case ADD_STA_SUCCESS:
1909 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1910 mvmsta->sta_id);
1911 break;
1912 default:
1913 ret = -EIO;
1914 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1915 mvmsta->sta_id);
1916 break;
1917 }
1918
1919 return ret;
1920}
1921
1922/*
1923 * Remove a station from the FW table. Before sending the command to remove
1924 * the station validate that the station is indeed known to the driver (sanity
1925 * only).
1926 */
1927static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1928{
1929 struct ieee80211_sta *sta;
1930 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1931 .sta_id = sta_id,
1932 };
1933 int ret;
1934
1935 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1936 lockdep_is_held(&mvm->mutex));
1937
1938 /* Note: internal stations are marked as error values */
1939 if (!sta) {
1940 IWL_ERR(mvm, "Invalid station id\n");
1941 return -EINVAL;
1942 }
1943
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03001944 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01001945 sizeof(rm_sta_cmd), &rm_sta_cmd);
1946 if (ret) {
1947 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1948 return ret;
1949 }
1950
1951 return 0;
1952}
1953
Liad Kaufman24afba72015-07-28 18:56:08 +03001954static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1955 struct ieee80211_vif *vif,
1956 struct iwl_mvm_sta *mvm_sta)
1957{
1958 int ac;
1959 int i;
1960
1961 lockdep_assert_held(&mvm->mutex);
1962
1963 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
Sara Sharon6862fce2017-02-22 19:34:17 +02001964 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
Liad Kaufman24afba72015-07-28 18:56:08 +03001965 continue;
1966
1967 ac = iwl_mvm_tid_to_ac_queue(i);
1968 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1969 vif->hw_queue[ac], i, 0);
Sara Sharon6862fce2017-02-22 19:34:17 +02001970 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman24afba72015-07-28 18:56:08 +03001971 }
1972}
1973
Sara Sharond6d517b2017-03-06 10:16:11 +02001974int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1975 struct iwl_mvm_sta *mvm_sta)
1976{
Sharon Dvirbec95222017-06-12 11:40:33 +03001977 int i;
Sara Sharond6d517b2017-03-06 10:16:11 +02001978
1979 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1980 u16 txq_id;
Sharon Dvirbec95222017-06-12 11:40:33 +03001981 int ret;
Sara Sharond6d517b2017-03-06 10:16:11 +02001982
1983 spin_lock_bh(&mvm_sta->lock);
1984 txq_id = mvm_sta->tid_data[i].txq_id;
1985 spin_unlock_bh(&mvm_sta->lock);
1986
1987 if (txq_id == IWL_MVM_INVALID_QUEUE)
1988 continue;
1989
1990 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1991 if (ret)
Sharon Dvirbec95222017-06-12 11:40:33 +03001992 return ret;
Sara Sharond6d517b2017-03-06 10:16:11 +02001993 }
1994
Sharon Dvirbec95222017-06-12 11:40:33 +03001995 return 0;
Sara Sharond6d517b2017-03-06 10:16:11 +02001996}
1997
Johannes Berg8ca151b2013-01-24 14:25:36 +01001998int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1999 struct ieee80211_vif *vif,
2000 struct ieee80211_sta *sta)
2001{
2002 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002003 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharon94c3e612016-12-07 15:04:37 +02002004 u8 sta_id = mvm_sta->sta_id;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002005 int ret;
2006
2007 lockdep_assert_held(&mvm->mutex);
2008
Sara Sharona571f5f2015-12-07 12:50:58 +02002009 if (iwl_mvm_has_new_rx_api(mvm))
2010 kfree(mvm_sta->dup_data);
2011
Johannes Bergc8f54702017-06-19 23:50:31 +02002012 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
2013 if (ret)
2014 return ret;
Sara Sharond6d517b2017-03-06 10:16:11 +02002015
Johannes Bergc8f54702017-06-19 23:50:31 +02002016 /* flush its queues here since we are freeing mvm_sta */
2017 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
2018 if (ret)
2019 return ret;
2020 if (iwl_mvm_has_new_tx_api(mvm)) {
2021 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
2022 } else {
2023 u32 q_mask = mvm_sta->tfd_queue_msk;
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02002024
Johannes Bergc8f54702017-06-19 23:50:31 +02002025 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2026 q_mask);
2027 }
2028 if (ret)
2029 return ret;
Liad Kaufman56214742016-09-22 15:14:08 +03002030
Johannes Bergc8f54702017-06-19 23:50:31 +02002031 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
Liad Kaufmana0315dea2016-07-07 13:25:59 +03002032
Johannes Bergc8f54702017-06-19 23:50:31 +02002033 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
Liad Kaufmana0315dea2016-07-07 13:25:59 +03002034
Johannes Bergc8f54702017-06-19 23:50:31 +02002035 /* If there is a TXQ still marked as reserved - free it */
2036 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
2037 u8 reserved_txq = mvm_sta->reserved_queue;
2038 enum iwl_mvm_queue_status *status;
2039
2040 /*
2041 * If no traffic has gone through the reserved TXQ - it
2042 * is still marked as IWL_MVM_QUEUE_RESERVED, and
2043 * should be manually marked as free again
2044 */
2045 spin_lock_bh(&mvm->queue_info_lock);
2046 status = &mvm->queue_info[reserved_txq].status;
2047 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
2048 (*status != IWL_MVM_QUEUE_FREE),
2049 "sta_id %d reserved txq %d status %d",
2050 sta_id, reserved_txq, *status)) {
Liad Kaufmana0315dea2016-07-07 13:25:59 +03002051 spin_unlock_bh(&mvm->queue_info_lock);
Johannes Bergc8f54702017-06-19 23:50:31 +02002052 return -EINVAL;
Liad Kaufmana0315dea2016-07-07 13:25:59 +03002053 }
2054
Johannes Bergc8f54702017-06-19 23:50:31 +02002055 *status = IWL_MVM_QUEUE_FREE;
2056 spin_unlock_bh(&mvm->queue_info_lock);
2057 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002058
Johannes Bergc8f54702017-06-19 23:50:31 +02002059 if (vif->type == NL80211_IFTYPE_STATION &&
2060 mvmvif->ap_sta_id == sta_id) {
2061 /* if associated - we can't remove the AP STA now */
2062 if (vif->bss_conf.assoc)
2063 return ret;
Eliad Peller37577fe2013-12-05 17:19:39 +02002064
Johannes Bergc8f54702017-06-19 23:50:31 +02002065 /* unassoc - go ahead - remove the AP STA now */
2066 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
2067
2068 /* clear d0i3_ap_sta_id if no longer relevant */
2069 if (mvm->d0i3_ap_sta_id == sta_id)
2070 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002071 }
2072
2073 /*
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03002074 * This shouldn't happen - the TDLS channel switch should be canceled
2075 * before the STA is removed.
2076 */
Sara Sharon94c3e612016-12-07 15:04:37 +02002077 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
Sara Sharon0ae98812017-01-04 14:53:58 +02002078 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03002079 cancel_delayed_work(&mvm->tdls_cs.dwork);
2080 }
2081
2082 /*
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03002083 * Make sure that the tx response code sees the station as -EBUSY and
2084 * calls the drain worker.
2085 */
2086 spin_lock_bh(&mvm_sta->lock);
Johannes Bergc8f54702017-06-19 23:50:31 +02002087 spin_unlock_bh(&mvm_sta->lock);
Sara Sharon94c3e612016-12-07 15:04:37 +02002088
Johannes Bergc8f54702017-06-19 23:50:31 +02002089 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
2090 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002091
2092 return ret;
2093}
2094
2095int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
2096 struct ieee80211_vif *vif,
2097 u8 sta_id)
2098{
2099 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
2100
2101 lockdep_assert_held(&mvm->mutex);
2102
Monam Agarwalc531c772014-03-24 00:05:56 +05302103 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002104 return ret;
2105}
2106
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002107int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
2108 struct iwl_mvm_int_sta *sta,
Sara Sharonced19f22017-02-06 19:09:32 +02002109 u32 qmask, enum nl80211_iftype iftype,
2110 enum iwl_sta_type type)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002111{
Avraham Sterndf65c8d2018-03-06 14:10:49 +02002112 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
2113 sta->sta_id == IWL_MVM_INVALID_STA) {
Eliad Pellerb92e6612014-01-23 17:58:23 +02002114 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
Sara Sharon0ae98812017-01-04 14:53:58 +02002115 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01002116 return -ENOSPC;
2117 }
2118
2119 sta->tfd_queue_msk = qmask;
Sara Sharonced19f22017-02-06 19:09:32 +02002120 sta->type = type;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002121
2122 /* put a non-NULL value so iterating over the stations won't stop */
2123 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
2124 return 0;
2125}
2126
Sara Sharon26d6c162017-01-03 12:00:19 +02002127void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002128{
Monam Agarwalc531c772014-03-24 00:05:56 +05302129 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002130 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
Sara Sharon0ae98812017-01-04 14:53:58 +02002131 sta->sta_id = IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002132}
2133
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002134static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
2135 u8 sta_id, u8 fifo)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002136{
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02002137 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
2138 mvm->cfg->base_params->wd_timeout :
2139 IWL_WATCHDOG_DISABLED;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002140
Sara Sharon310181e2017-01-17 14:27:48 +02002141 if (iwl_mvm_has_new_tx_api(mvm)) {
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002142 int tvqm_queue =
2143 iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
2144 IWL_MAX_TID_COUNT,
2145 wdg_timeout);
2146 *queue = tvqm_queue;
Johannes Bergc8f54702017-06-19 23:50:31 +02002147 } else {
Liad Kaufman28d07932015-09-01 16:36:25 +03002148 struct iwl_trans_txq_scd_cfg cfg = {
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002149 .fifo = fifo,
2150 .sta_id = sta_id,
Liad Kaufman28d07932015-09-01 16:36:25 +03002151 .tid = IWL_MAX_TID_COUNT,
2152 .aggregate = false,
2153 .frame_limit = IWL_FRAME_LIMIT,
2154 };
2155
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002156 iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
Liad Kaufman28d07932015-09-01 16:36:25 +03002157 }
Sara Sharonc5a719e2016-11-15 10:20:48 +02002158}
2159
2160int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
2161{
2162 int ret;
2163
2164 lockdep_assert_held(&mvm->mutex);
2165
2166 /* Allocate aux station and assign to it the aux queue */
2167 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
Sara Sharonced19f22017-02-06 19:09:32 +02002168 NL80211_IFTYPE_UNSPECIFIED,
2169 IWL_STA_AUX_ACTIVITY);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002170 if (ret)
2171 return ret;
2172
2173 /* Map Aux queue to fifo - needs to happen before adding Aux station */
2174 if (!iwl_mvm_has_new_tx_api(mvm))
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002175 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2176 mvm->aux_sta.sta_id,
2177 IWL_MVM_TX_FIFO_MCAST);
Liad Kaufman28d07932015-09-01 16:36:25 +03002178
Johannes Berg8ca151b2013-01-24 14:25:36 +01002179 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
2180 MAC_INDEX_AUX, 0);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002181 if (ret) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002182 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002183 return ret;
2184 }
2185
2186 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02002187 * For 22000 firmware and on we cannot add queue to a station unknown
Sara Sharonc5a719e2016-11-15 10:20:48 +02002188 * to firmware so enable queue here - after the station was added
2189 */
2190 if (iwl_mvm_has_new_tx_api(mvm))
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002191 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2192 mvm->aux_sta.sta_id,
2193 IWL_MVM_TX_FIFO_MCAST);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002194
2195 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002196}
2197
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002198int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2199{
2200 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002201 int ret;
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002202
2203 lockdep_assert_held(&mvm->mutex);
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002204
2205 /* Map snif queue to fifo - must happen before adding snif station */
2206 if (!iwl_mvm_has_new_tx_api(mvm))
2207 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2208 mvm->snif_sta.sta_id,
2209 IWL_MVM_TX_FIFO_BE);
2210
2211 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002212 mvmvif->id, 0);
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002213 if (ret)
2214 return ret;
2215
2216 /*
2217 * For 22000 firmware and on we cannot add queue to a station unknown
2218 * to firmware so enable queue here - after the station was added
2219 */
2220 if (iwl_mvm_has_new_tx_api(mvm))
2221 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2222 mvm->snif_sta.sta_id,
2223 IWL_MVM_TX_FIFO_BE);
2224
2225 return 0;
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002226}
2227
2228int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2229{
2230 int ret;
2231
2232 lockdep_assert_held(&mvm->mutex);
2233
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002234 iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
2235 IWL_MAX_TID_COUNT, 0);
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002236 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2237 if (ret)
2238 IWL_WARN(mvm, "Failed sending remove station\n");
2239
2240 return ret;
2241}
2242
2243void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2244{
2245 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2246}
2247
Johannes Berg712b24a2014-08-04 14:14:14 +02002248void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
2249{
2250 lockdep_assert_held(&mvm->mutex);
2251
2252 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2253}
2254
Johannes Berg8ca151b2013-01-24 14:25:36 +01002255/*
2256 * Send the add station command for the vif's broadcast station.
2257 * Assumes that the station was already allocated.
2258 *
2259 * @mvm: the mvm component
2260 * @vif: the interface to which the broadcast station is added
2261 * @bsta: the broadcast station to add.
2262 */
Johannes Berg013290a2014-08-04 13:38:48 +02002263int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002264{
2265 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002266 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg5023d962013-07-31 14:07:43 +02002267 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
Johannes Berga4243402014-01-20 23:46:38 +01002268 const u8 *baddr = _baddr;
Johannes Berg7daa7622017-02-24 12:02:22 +01002269 int queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002270 int ret;
Sara Sharonc5a719e2016-11-15 10:20:48 +02002271 unsigned int wdg_timeout =
2272 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2273 struct iwl_trans_txq_scd_cfg cfg = {
2274 .fifo = IWL_MVM_TX_FIFO_VO,
2275 .sta_id = mvmvif->bcast_sta.sta_id,
2276 .tid = IWL_MAX_TID_COUNT,
2277 .aggregate = false,
2278 .frame_limit = IWL_FRAME_LIMIT,
2279 };
Johannes Berg8ca151b2013-01-24 14:25:36 +01002280
2281 lockdep_assert_held(&mvm->mutex);
2282
Johannes Bergc8f54702017-06-19 23:50:31 +02002283 if (!iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufman4d339982017-03-21 17:13:16 +02002284 if (vif->type == NL80211_IFTYPE_AP ||
2285 vif->type == NL80211_IFTYPE_ADHOC)
Sara Sharon49f71712017-01-09 12:07:16 +02002286 queue = mvm->probe_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002287 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
Sara Sharon49f71712017-01-09 12:07:16 +02002288 queue = mvm->p2p_dev_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002289 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
Liad Kaufmande24f632015-08-04 15:19:18 +03002290 return -EINVAL;
2291
Liad Kaufmandf88c082016-11-24 15:31:00 +02002292 bsta->tfd_queue_msk |= BIT(queue);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002293
Sara Sharon310181e2017-01-17 14:27:48 +02002294 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
2295 &cfg, wdg_timeout);
Liad Kaufmande24f632015-08-04 15:19:18 +03002296 }
2297
Johannes Berg5023d962013-07-31 14:07:43 +02002298 if (vif->type == NL80211_IFTYPE_ADHOC)
2299 baddr = vif->bss_conf.bssid;
2300
Sara Sharon0ae98812017-01-04 14:53:58 +02002301 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01002302 return -ENOSPC;
2303
Liad Kaufmandf88c082016-11-24 15:31:00 +02002304 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2305 mvmvif->id, mvmvif->color);
2306 if (ret)
2307 return ret;
2308
2309 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02002310 * For 22000 firmware and on we cannot add queue to a station unknown
Sara Sharonc5a719e2016-11-15 10:20:48 +02002311 * to firmware so enable queue here - after the station was added
Liad Kaufmandf88c082016-11-24 15:31:00 +02002312 */
Sara Sharon310181e2017-01-17 14:27:48 +02002313 if (iwl_mvm_has_new_tx_api(mvm)) {
Johannes Berg7daa7622017-02-24 12:02:22 +01002314 queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
2315 bsta->sta_id,
2316 IWL_MAX_TID_COUNT,
2317 wdg_timeout);
2318
Luca Coelho7b758a12017-06-20 13:40:03 +03002319 if (vif->type == NL80211_IFTYPE_AP ||
2320 vif->type == NL80211_IFTYPE_ADHOC)
Sara Sharon310181e2017-01-17 14:27:48 +02002321 mvm->probe_queue = queue;
2322 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2323 mvm->p2p_dev_queue = queue;
Sara Sharon310181e2017-01-17 14:27:48 +02002324 }
Liad Kaufmandf88c082016-11-24 15:31:00 +02002325
2326 return 0;
2327}
2328
2329static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2330 struct ieee80211_vif *vif)
2331{
2332 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002333 int queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002334
2335 lockdep_assert_held(&mvm->mutex);
2336
Sara Sharond49394a2017-03-05 13:01:08 +02002337 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
2338
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002339 switch (vif->type) {
2340 case NL80211_IFTYPE_AP:
2341 case NL80211_IFTYPE_ADHOC:
2342 queue = mvm->probe_queue;
2343 break;
2344 case NL80211_IFTYPE_P2P_DEVICE:
2345 queue = mvm->p2p_dev_queue;
2346 break;
2347 default:
2348 WARN(1, "Can't free bcast queue on vif type %d\n",
2349 vif->type);
2350 return;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002351 }
2352
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002353 iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
2354 if (iwl_mvm_has_new_tx_api(mvm))
2355 return;
2356
2357 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2358 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002359}
2360
2361/* Send the FW a request to remove the station from it's internal data
2362 * structures, but DO NOT remove the entry from the local data structures. */
Johannes Berg013290a2014-08-04 13:38:48 +02002363int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002364{
Johannes Berg013290a2014-08-04 13:38:48 +02002365 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002366 int ret;
2367
2368 lockdep_assert_held(&mvm->mutex);
2369
Johannes Bergc8f54702017-06-19 23:50:31 +02002370 iwl_mvm_free_bcast_sta_queues(mvm, vif);
Liad Kaufmandf88c082016-11-24 15:31:00 +02002371
Johannes Berg013290a2014-08-04 13:38:48 +02002372 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002373 if (ret)
2374 IWL_WARN(mvm, "Failed sending remove station\n");
2375 return ret;
2376}
2377
Johannes Berg013290a2014-08-04 13:38:48 +02002378int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2379{
2380 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002381
2382 lockdep_assert_held(&mvm->mutex);
2383
Johannes Bergc8f54702017-06-19 23:50:31 +02002384 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
Sara Sharonced19f22017-02-06 19:09:32 +02002385 ieee80211_vif_type_p2p(vif),
2386 IWL_STA_GENERAL_PURPOSE);
Johannes Berg013290a2014-08-04 13:38:48 +02002387}
2388
Johannes Berg8ca151b2013-01-24 14:25:36 +01002389/* Allocate a new station entry for the broadcast station to the given vif,
2390 * and send it to the FW.
2391 * Note that each P2P mac should have its own broadcast station.
2392 *
2393 * @mvm: the mvm component
2394 * @vif: the interface to which the broadcast station is added
2395 * @bsta: the broadcast station to add. */
Luca Coelhod1973582017-06-22 16:00:25 +03002396int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002397{
2398 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002399 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002400 int ret;
2401
2402 lockdep_assert_held(&mvm->mutex);
2403
Johannes Berg013290a2014-08-04 13:38:48 +02002404 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002405 if (ret)
2406 return ret;
2407
Johannes Berg013290a2014-08-04 13:38:48 +02002408 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002409
2410 if (ret)
2411 iwl_mvm_dealloc_int_sta(mvm, bsta);
Johannes Berg013290a2014-08-04 13:38:48 +02002412
Johannes Berg8ca151b2013-01-24 14:25:36 +01002413 return ret;
2414}
2415
Johannes Berg013290a2014-08-04 13:38:48 +02002416void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2417{
2418 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2419
2420 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2421}
2422
Johannes Berg8ca151b2013-01-24 14:25:36 +01002423/*
2424 * Send the FW a request to remove the station from it's internal data
2425 * structures, and in addition remove it from the local data structure.
2426 */
Luca Coelhod1973582017-06-22 16:00:25 +03002427int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002428{
2429 int ret;
2430
2431 lockdep_assert_held(&mvm->mutex);
2432
Johannes Berg013290a2014-08-04 13:38:48 +02002433 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002434
Johannes Berg013290a2014-08-04 13:38:48 +02002435 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2436
Johannes Berg8ca151b2013-01-24 14:25:36 +01002437 return ret;
2438}
2439
Sara Sharon26d6c162017-01-03 12:00:19 +02002440/*
2441 * Allocate a new station entry for the multicast station to the given vif,
2442 * and send it to the FW.
2443 * Note that each AP/GO mac should have its own multicast station.
2444 *
2445 * @mvm: the mvm component
2446 * @vif: the interface to which the multicast station is added
2447 */
2448int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2449{
2450 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2451 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2452 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2453 const u8 *maddr = _maddr;
2454 struct iwl_trans_txq_scd_cfg cfg = {
2455 .fifo = IWL_MVM_TX_FIFO_MCAST,
2456 .sta_id = msta->sta_id,
Ilan Peer6508de02018-01-25 15:22:41 +02002457 .tid = 0,
Sara Sharon26d6c162017-01-03 12:00:19 +02002458 .aggregate = false,
2459 .frame_limit = IWL_FRAME_LIMIT,
2460 };
2461 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2462 int ret;
2463
2464 lockdep_assert_held(&mvm->mutex);
2465
Liad Kaufmanee48b722017-03-21 17:13:16 +02002466 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2467 vif->type != NL80211_IFTYPE_ADHOC))
Sara Sharon26d6c162017-01-03 12:00:19 +02002468 return -ENOTSUPP;
2469
Sara Sharonced19f22017-02-06 19:09:32 +02002470 /*
Sara Sharonfc07bd82017-12-21 15:05:28 +02002471 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2472 * invalid, so make sure we use the queue we want.
2473 * Note that this is done here as we want to avoid making DQA
2474 * changes in mac80211 layer.
2475 */
2476 if (vif->type == NL80211_IFTYPE_ADHOC) {
2477 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2478 mvmvif->cab_queue = vif->cab_queue;
2479 }
2480
2481 /*
Sara Sharonced19f22017-02-06 19:09:32 +02002482 * While in previous FWs we had to exclude cab queue from TFD queue
2483 * mask, now it is needed as any other queue.
2484 */
2485 if (!iwl_mvm_has_new_tx_api(mvm) &&
2486 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2487 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2488 &cfg, timeout);
2489 msta->tfd_queue_msk |= BIT(vif->cab_queue);
2490 }
Sara Sharon26d6c162017-01-03 12:00:19 +02002491 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2492 mvmvif->id, mvmvif->color);
2493 if (ret) {
2494 iwl_mvm_dealloc_int_sta(mvm, msta);
2495 return ret;
2496 }
2497
2498 /*
2499 * Enable cab queue after the ADD_STA command is sent.
Luca Coelho2f7a3862017-11-15 15:07:34 +02002500 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
Sara Sharonced19f22017-02-06 19:09:32 +02002501 * command with unknown station id, and for FW that doesn't support
2502 * station API since the cab queue is not included in the
2503 * tfd_queue_mask.
Sara Sharon26d6c162017-01-03 12:00:19 +02002504 */
Sara Sharon310181e2017-01-17 14:27:48 +02002505 if (iwl_mvm_has_new_tx_api(mvm)) {
2506 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2507 msta->sta_id,
Ilan Peer6508de02018-01-25 15:22:41 +02002508 0,
Sara Sharon310181e2017-01-17 14:27:48 +02002509 timeout);
Sara Sharone2af3fa2017-02-22 19:35:10 +02002510 mvmvif->cab_queue = queue;
Sara Sharonced19f22017-02-06 19:09:32 +02002511 } else if (!fw_has_api(&mvm->fw->ucode_capa,
Sara Sharonfc07bd82017-12-21 15:05:28 +02002512 IWL_UCODE_TLV_API_STA_TYPE))
Sara Sharon310181e2017-01-17 14:27:48 +02002513 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2514 &cfg, timeout);
Sara Sharon26d6c162017-01-03 12:00:19 +02002515
Avraham Stern337bfc92018-06-04 15:10:18 +03002516 if (mvmvif->ap_wep_key) {
2517 u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
2518
2519 if (key_offset == STA_KEY_IDX_INVALID)
2520 return -ENOSPC;
2521
2522 ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
2523 mvmvif->ap_wep_key, 1, 0, NULL, 0,
2524 key_offset, 0);
2525 if (ret)
2526 return ret;
2527 }
2528
Sara Sharon26d6c162017-01-03 12:00:19 +02002529 return 0;
2530}
2531
2532/*
2533 * Send the FW a request to remove the station from it's internal data
2534 * structures, and in addition remove it from the local data structure.
2535 */
2536int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2537{
2538 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2539 int ret;
2540
2541 lockdep_assert_held(&mvm->mutex);
2542
Sara Sharond49394a2017-03-05 13:01:08 +02002543 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2544
Sara Sharone2af3fa2017-02-22 19:35:10 +02002545 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
Ilan Peer6508de02018-01-25 15:22:41 +02002546 0, 0);
Sara Sharon26d6c162017-01-03 12:00:19 +02002547
2548 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2549 if (ret)
2550 IWL_WARN(mvm, "Failed sending remove station\n");
2551
2552 return ret;
2553}
2554
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002555#define IWL_MAX_RX_BA_SESSIONS 16
2556
Sara Sharonb915c102016-03-23 16:32:02 +02002557static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
Sara Sharon10b2b202016-03-20 16:23:41 +02002558{
Sara Sharonb915c102016-03-23 16:32:02 +02002559 struct iwl_mvm_delba_notif notif = {
2560 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2561 .metadata.sync = 1,
2562 .delba.baid = baid,
Sara Sharon10b2b202016-03-20 16:23:41 +02002563 };
Sara Sharonb915c102016-03-23 16:32:02 +02002564 iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
2565};
Sara Sharon10b2b202016-03-20 16:23:41 +02002566
Sara Sharonb915c102016-03-23 16:32:02 +02002567static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2568 struct iwl_mvm_baid_data *data)
2569{
2570 int i;
2571
2572 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2573
2574 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2575 int j;
2576 struct iwl_mvm_reorder_buffer *reorder_buf =
2577 &data->reorder_buf[i];
Johannes Bergdfdddd92017-09-26 12:24:51 +02002578 struct iwl_mvm_reorder_buf_entry *entries =
2579 &data->entries[i * data->entries_per_queue];
Sara Sharonb915c102016-03-23 16:32:02 +02002580
Sara Sharon06904052016-02-28 20:28:17 +02002581 spin_lock_bh(&reorder_buf->lock);
2582 if (likely(!reorder_buf->num_stored)) {
2583 spin_unlock_bh(&reorder_buf->lock);
Sara Sharonb915c102016-03-23 16:32:02 +02002584 continue;
Sara Sharon06904052016-02-28 20:28:17 +02002585 }
Sara Sharonb915c102016-03-23 16:32:02 +02002586
2587 /*
2588 * This shouldn't happen in regular DELBA since the internal
2589 * delBA notification should trigger a release of all frames in
2590 * the reorder buffer.
2591 */
2592 WARN_ON(1);
2593
2594 for (j = 0; j < reorder_buf->buf_size; j++)
Johannes Bergdfdddd92017-09-26 12:24:51 +02002595 __skb_queue_purge(&entries[j].e.frames);
Sara Sharon06904052016-02-28 20:28:17 +02002596 /*
2597 * Prevent timer re-arm. This prevents a very far fetched case
2598 * where we timed out on the notification. There may be prior
2599 * RX frames pending in the RX queue before the notification
2600 * that might get processed between now and the actual deletion
2601 * and we would re-arm the timer although we are deleting the
2602 * reorder buffer.
2603 */
2604 reorder_buf->removed = true;
2605 spin_unlock_bh(&reorder_buf->lock);
2606 del_timer_sync(&reorder_buf->reorder_timer);
Sara Sharonb915c102016-03-23 16:32:02 +02002607 }
2608}
2609
2610static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
Sara Sharonb915c102016-03-23 16:32:02 +02002611 struct iwl_mvm_baid_data *data,
Luca Coelho514c30692018-06-24 11:59:54 +03002612 u16 ssn, u16 buf_size)
Sara Sharonb915c102016-03-23 16:32:02 +02002613{
2614 int i;
2615
2616 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2617 struct iwl_mvm_reorder_buffer *reorder_buf =
2618 &data->reorder_buf[i];
Johannes Bergdfdddd92017-09-26 12:24:51 +02002619 struct iwl_mvm_reorder_buf_entry *entries =
2620 &data->entries[i * data->entries_per_queue];
Sara Sharonb915c102016-03-23 16:32:02 +02002621 int j;
2622
2623 reorder_buf->num_stored = 0;
2624 reorder_buf->head_sn = ssn;
2625 reorder_buf->buf_size = buf_size;
Sara Sharon06904052016-02-28 20:28:17 +02002626 /* rx reorder timer */
Kees Cook8cef5342017-10-24 02:29:37 -07002627 timer_setup(&reorder_buf->reorder_timer,
2628 iwl_mvm_reorder_timer_expired, 0);
Sara Sharon06904052016-02-28 20:28:17 +02002629 spin_lock_init(&reorder_buf->lock);
2630 reorder_buf->mvm = mvm;
Sara Sharonb915c102016-03-23 16:32:02 +02002631 reorder_buf->queue = i;
Sara Sharon5d43eab2017-02-02 12:51:39 +02002632 reorder_buf->valid = false;
Sara Sharonb915c102016-03-23 16:32:02 +02002633 for (j = 0; j < reorder_buf->buf_size; j++)
Johannes Bergdfdddd92017-09-26 12:24:51 +02002634 __skb_queue_head_init(&entries[j].e.frames);
Sara Sharonb915c102016-03-23 16:32:02 +02002635 }
Sara Sharon10b2b202016-03-20 16:23:41 +02002636}
2637
Johannes Berg8ca151b2013-01-24 14:25:36 +01002638int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Luca Coelho514c30692018-06-24 11:59:54 +03002639 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002640{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002641 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002642 struct iwl_mvm_add_sta_cmd cmd = {};
Sara Sharon10b2b202016-03-20 16:23:41 +02002643 struct iwl_mvm_baid_data *baid_data = NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002644 int ret;
2645 u32 status;
2646
2647 lockdep_assert_held(&mvm->mutex);
2648
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002649 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2650 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2651 return -ENOSPC;
2652 }
2653
Sara Sharon10b2b202016-03-20 16:23:41 +02002654 if (iwl_mvm_has_new_rx_api(mvm) && start) {
Johannes Bergdfdddd92017-09-26 12:24:51 +02002655 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2656
2657 /* sparse doesn't like the __align() so don't check */
2658#ifndef __CHECKER__
2659 /*
2660 * The division below will be OK if either the cache line size
2661 * can be divided by the entry size (ALIGN will round up) or if
2662 * if the entry size can be divided by the cache line size, in
2663 * which case the ALIGN() will do nothing.
2664 */
2665 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2666 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2667#endif
2668
2669 /*
2670 * Upward align the reorder buffer size to fill an entire cache
2671 * line for each queue, to avoid sharing cache lines between
2672 * different queues.
2673 */
2674 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2675
Sara Sharon10b2b202016-03-20 16:23:41 +02002676 /*
2677 * Allocate here so if allocation fails we can bail out early
2678 * before starting the BA session in the firmware
2679 */
Sara Sharonb915c102016-03-23 16:32:02 +02002680 baid_data = kzalloc(sizeof(*baid_data) +
2681 mvm->trans->num_rx_queues *
Johannes Bergdfdddd92017-09-26 12:24:51 +02002682 reorder_buf_size,
Sara Sharonb915c102016-03-23 16:32:02 +02002683 GFP_KERNEL);
Sara Sharon10b2b202016-03-20 16:23:41 +02002684 if (!baid_data)
2685 return -ENOMEM;
Johannes Bergdfdddd92017-09-26 12:24:51 +02002686
2687 /*
2688 * This division is why we need the above BUILD_BUG_ON(),
2689 * if that doesn't hold then this will not be right.
2690 */
2691 baid_data->entries_per_queue =
2692 reorder_buf_size / sizeof(baid_data->entries[0]);
Sara Sharon10b2b202016-03-20 16:23:41 +02002693 }
2694
Johannes Berg8ca151b2013-01-24 14:25:36 +01002695 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2696 cmd.sta_id = mvm_sta->sta_id;
2697 cmd.add_modify = STA_MODE_MODIFY;
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002698 if (start) {
2699 cmd.add_immediate_ba_tid = (u8) tid;
2700 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
Luca Coelho514c30692018-06-24 11:59:54 +03002701 cmd.rx_ba_window = cpu_to_le16(buf_size);
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002702 } else {
2703 cmd.remove_immediate_ba_tid = (u8) tid;
2704 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002705 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2706 STA_MODIFY_REMOVE_BA_TID;
2707
2708 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002709 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2710 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002711 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002712 if (ret)
Sara Sharon10b2b202016-03-20 16:23:41 +02002713 goto out_free;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002714
Sara Sharon837c4da2016-01-07 16:50:45 +02002715 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002716 case ADD_STA_SUCCESS:
Sara Sharon35263a02016-06-21 12:12:10 +03002717 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2718 start ? "start" : "stopp");
Johannes Berg8ca151b2013-01-24 14:25:36 +01002719 break;
2720 case ADD_STA_IMMEDIATE_BA_FAILURE:
2721 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2722 ret = -ENOSPC;
2723 break;
2724 default:
2725 ret = -EIO;
2726 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2727 start ? "start" : "stopp", status);
2728 break;
2729 }
2730
Sara Sharon10b2b202016-03-20 16:23:41 +02002731 if (ret)
2732 goto out_free;
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002733
Sara Sharon10b2b202016-03-20 16:23:41 +02002734 if (start) {
2735 u8 baid;
2736
2737 mvm->rx_ba_sessions++;
2738
2739 if (!iwl_mvm_has_new_rx_api(mvm))
2740 return 0;
2741
2742 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2743 ret = -EINVAL;
2744 goto out_free;
2745 }
2746 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2747 IWL_ADD_STA_BAID_SHIFT);
2748 baid_data->baid = baid;
2749 baid_data->timeout = timeout;
2750 baid_data->last_rx = jiffies;
Kees Cook8cef5342017-10-24 02:29:37 -07002751 baid_data->rcu_ptr = &mvm->baid_map[baid];
2752 timer_setup(&baid_data->session_timer,
2753 iwl_mvm_rx_agg_session_expired, 0);
Sara Sharon10b2b202016-03-20 16:23:41 +02002754 baid_data->mvm = mvm;
2755 baid_data->tid = tid;
2756 baid_data->sta_id = mvm_sta->sta_id;
2757
2758 mvm_sta->tid_to_baid[tid] = baid;
2759 if (timeout)
2760 mod_timer(&baid_data->session_timer,
2761 TU_TO_EXP_TIME(timeout * 2));
2762
Sara Sharon3f1c4c52017-10-02 12:07:59 +03002763 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
Sara Sharon10b2b202016-03-20 16:23:41 +02002764 /*
2765 * protect the BA data with RCU to cover a case where our
2766 * internal RX sync mechanism will timeout (not that it's
2767 * supposed to happen) and we will free the session data while
2768 * RX is being processed in parallel
2769 */
Sara Sharon35263a02016-06-21 12:12:10 +03002770 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2771 mvm_sta->sta_id, tid, baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002772 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2773 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
Sara Sharon60dec522016-06-21 14:14:08 +03002774 } else {
Sara Sharon10b2b202016-03-20 16:23:41 +02002775 u8 baid = mvm_sta->tid_to_baid[tid];
2776
Sara Sharon60dec522016-06-21 14:14:08 +03002777 if (mvm->rx_ba_sessions > 0)
2778 /* check that restart flow didn't zero the counter */
2779 mvm->rx_ba_sessions--;
Sara Sharon10b2b202016-03-20 16:23:41 +02002780 if (!iwl_mvm_has_new_rx_api(mvm))
2781 return 0;
2782
2783 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2784 return -EINVAL;
2785
2786 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2787 if (WARN_ON(!baid_data))
2788 return -EINVAL;
2789
2790 /* synchronize all rx queues so we can safely delete */
Sara Sharonb915c102016-03-23 16:32:02 +02002791 iwl_mvm_free_reorder(mvm, baid_data);
Sara Sharon10b2b202016-03-20 16:23:41 +02002792 del_timer_sync(&baid_data->session_timer);
Sara Sharon10b2b202016-03-20 16:23:41 +02002793 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2794 kfree_rcu(baid_data, rcu_head);
Sara Sharon35263a02016-06-21 12:12:10 +03002795 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002796 }
2797 return 0;
2798
2799out_free:
2800 kfree(baid_data);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002801 return ret;
2802}
2803
Liad Kaufman9794c642015-08-19 17:34:28 +03002804int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2805 int tid, u8 queue, bool start)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002806{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002807 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002808 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01002809 int ret;
2810 u32 status;
2811
2812 lockdep_assert_held(&mvm->mutex);
2813
2814 if (start) {
2815 mvm_sta->tfd_queue_msk |= BIT(queue);
2816 mvm_sta->tid_disable_agg &= ~BIT(tid);
2817 } else {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002818 /* In DQA-mode the queue isn't removed on agg termination */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002819 mvm_sta->tid_disable_agg |= BIT(tid);
2820 }
2821
2822 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2823 cmd.sta_id = mvm_sta->sta_id;
2824 cmd.add_modify = STA_MODE_MODIFY;
Sara Sharonbb497012016-09-29 14:52:40 +03002825 if (!iwl_mvm_has_new_tx_api(mvm))
2826 cmd.modify_mask = STA_MODIFY_QUEUES;
2827 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002828 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2829 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2830
2831 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002832 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2833 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002834 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002835 if (ret)
2836 return ret;
2837
Sara Sharon837c4da2016-01-07 16:50:45 +02002838 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002839 case ADD_STA_SUCCESS:
2840 break;
2841 default:
2842 ret = -EIO;
2843 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2844 start ? "start" : "stopp", status);
2845 break;
2846 }
2847
2848 return ret;
2849}
2850
Emmanuel Grumbachb797e3f2014-03-06 14:49:36 +02002851const u8 tid_to_mac80211_ac[] = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002852 IEEE80211_AC_BE,
2853 IEEE80211_AC_BK,
2854 IEEE80211_AC_BK,
2855 IEEE80211_AC_BE,
2856 IEEE80211_AC_VI,
2857 IEEE80211_AC_VI,
2858 IEEE80211_AC_VO,
2859 IEEE80211_AC_VO,
Liad Kaufman9794c642015-08-19 17:34:28 +03002860 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002861};
2862
Johannes Berg3e56ead2013-02-15 22:23:18 +01002863static const u8 tid_to_ucode_ac[] = {
2864 AC_BE,
2865 AC_BK,
2866 AC_BK,
2867 AC_BE,
2868 AC_VI,
2869 AC_VI,
2870 AC_VO,
2871 AC_VO,
2872};
2873
Johannes Berg8ca151b2013-01-24 14:25:36 +01002874int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2875 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2876{
Johannes Berg5b577a92013-11-14 18:20:04 +01002877 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002878 struct iwl_mvm_tid_data *tid_data;
Liad Kaufmandd321622017-04-05 16:25:11 +03002879 u16 normalized_ssn;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002880 int txq_id;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002881 int ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002882
2883 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2884 return -EINVAL;
2885
Naftali Goldsteinbd800e42017-08-28 11:51:05 +03002886 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2887 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2888 IWL_ERR(mvm,
2889 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
Johannes Berg8ca151b2013-01-24 14:25:36 +01002890 mvmsta->tid_data[tid].state);
2891 return -ENXIO;
2892 }
2893
2894 lockdep_assert_held(&mvm->mutex);
2895
Liad Kaufmanbd8f3fc2018-01-17 15:25:28 +02002896 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2897 iwl_mvm_has_new_tx_api(mvm)) {
2898 u8 ac = tid_to_mac80211_ac[tid];
2899
2900 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2901 if (ret)
2902 return ret;
2903 }
2904
Arik Nemtsovb2492502014-03-13 12:21:50 +02002905 spin_lock_bh(&mvmsta->lock);
2906
2907 /* possible race condition - we entered D0i3 while starting agg */
2908 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2909 spin_unlock_bh(&mvmsta->lock);
2910 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2911 return -EIO;
2912 }
2913
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002914 spin_lock(&mvm->queue_info_lock);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002915
Liad Kaufmancf961e12015-08-13 19:16:08 +03002916 /*
2917 * Note the possible cases:
Avraham Stern4a6d2e52018-03-05 11:26:53 +02002918 * 1. An enabled TXQ - TXQ needs to become agg'ed
2919 * 2. The TXQ hasn't yet been enabled, so find a free one and mark
2920 * it as reserved
Liad Kaufmancf961e12015-08-13 19:16:08 +03002921 */
2922 txq_id = mvmsta->tid_data[tid].txq_id;
Avraham Stern4a6d2e52018-03-05 11:26:53 +02002923 if (txq_id == IWL_MVM_INVALID_QUEUE) {
Liad Kaufman9794c642015-08-19 17:34:28 +03002924 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
Johannes Bergc8f54702017-06-19 23:50:31 +02002925 IWL_MVM_DQA_MIN_DATA_QUEUE,
2926 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002927 if (txq_id < 0) {
2928 ret = txq_id;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002929 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2930 goto release_locks;
2931 }
2932
2933 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2934 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
Avraham Stern4a6d2e52018-03-05 11:26:53 +02002935 } else if (unlikely(mvm->queue_info[txq_id].status ==
2936 IWL_MVM_QUEUE_SHARED)) {
2937 ret = -ENXIO;
2938 IWL_DEBUG_TX_QUEUES(mvm,
2939 "Can't start tid %d agg on shared queue!\n",
2940 tid);
2941 goto release_locks;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002942 }
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002943
2944 spin_unlock(&mvm->queue_info_lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002945
Liad Kaufmancf961e12015-08-13 19:16:08 +03002946 IWL_DEBUG_TX_QUEUES(mvm,
2947 "AGG for tid %d will be on queue #%d\n",
2948 tid, txq_id);
2949
Johannes Berg8ca151b2013-01-24 14:25:36 +01002950 tid_data = &mvmsta->tid_data[tid];
Johannes Berg9a886582013-02-15 19:25:00 +01002951 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002952 tid_data->txq_id = txq_id;
2953 *ssn = tid_data->ssn;
2954
2955 IWL_DEBUG_TX_QUEUES(mvm,
2956 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2957 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2958 tid_data->next_reclaimed);
2959
Liad Kaufmandd321622017-04-05 16:25:11 +03002960 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02002961 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
Liad Kaufmandd321622017-04-05 16:25:11 +03002962 * to align the wrap around of ssn so we compare relevant values.
2963 */
2964 normalized_ssn = tid_data->ssn;
2965 if (mvm->trans->cfg->gen2)
2966 normalized_ssn &= 0xff;
2967
2968 if (normalized_ssn == tid_data->next_reclaimed) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002969 tid_data->state = IWL_AGG_STARTING;
2970 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2971 } else {
2972 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2973 }
2974
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002975 ret = 0;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002976 goto out;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002977
2978release_locks:
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002979 spin_unlock(&mvm->queue_info_lock);
2980out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01002981 spin_unlock_bh(&mvmsta->lock);
2982
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002983 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002984}
2985
2986int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
Luca Coelho514c30692018-06-24 11:59:54 +03002987 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002988 bool amsdu)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002989{
Johannes Berg5b577a92013-11-14 18:20:04 +01002990 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002991 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +02002992 unsigned int wdg_timeout =
2993 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002994 int queue, ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002995 bool alloc_queue = true;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002996 enum iwl_mvm_queue_status queue_status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002997 u16 ssn;
2998
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002999 struct iwl_trans_txq_scd_cfg cfg = {
3000 .sta_id = mvmsta->sta_id,
3001 .tid = tid,
3002 .frame_limit = buf_size,
3003 .aggregate = true,
3004 };
3005
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02003006 /*
3007 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
3008 * manager, so this function should never be called in this case.
3009 */
Emmanuel Grumbach4243edb2017-12-13 11:38:48 +02003010 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02003011 return -EINVAL;
3012
Eyal Shapiraefed6642014-09-14 15:58:53 +03003013 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
3014 != IWL_MAX_TID_COUNT);
3015
Johannes Berg8ca151b2013-01-24 14:25:36 +01003016 spin_lock_bh(&mvmsta->lock);
3017 ssn = tid_data->ssn;
3018 queue = tid_data->txq_id;
3019 tid_data->state = IWL_AGG_ON;
Eyal Shapiraefed6642014-09-14 15:58:53 +03003020 mvmsta->agg_tids |= BIT(tid);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003021 tid_data->ssn = 0xffff;
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02003022 tid_data->amsdu_in_ampdu_allowed = amsdu;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003023 spin_unlock_bh(&mvmsta->lock);
3024
Sara Sharon34e10862017-02-23 13:15:07 +02003025 if (iwl_mvm_has_new_tx_api(mvm)) {
3026 /*
Sara Sharon0ec9257b2017-10-16 09:45:10 +03003027 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
3028 * would have failed, so if we are here there is no need to
3029 * allocate a queue.
3030 * However, if aggregation size is different than the default
3031 * size, the scheduler should be reconfigured.
3032 * We cannot do this with the new TX API, so return unsupported
3033 * for now, until it will be offloaded to firmware..
3034 * Note that if SCD default value changes - this condition
3035 * should be updated as well.
Sara Sharon34e10862017-02-23 13:15:07 +02003036 */
Sara Sharon0ec9257b2017-10-16 09:45:10 +03003037 if (buf_size < IWL_FRAME_LIMIT)
Sara Sharon34e10862017-02-23 13:15:07 +02003038 return -ENOTSUPP;
3039
3040 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3041 if (ret)
3042 return -EIO;
3043 goto out;
3044 }
3045
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02003046 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
Johannes Berg8ca151b2013-01-24 14:25:36 +01003047
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02003048 spin_lock_bh(&mvm->queue_info_lock);
3049 queue_status = mvm->queue_info[queue].status;
3050 spin_unlock_bh(&mvm->queue_info_lock);
3051
Johannes Bergc8f54702017-06-19 23:50:31 +02003052 /* Maybe there is no need to even alloc a queue... */
3053 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
3054 alloc_queue = false;
Liad Kaufmancf961e12015-08-13 19:16:08 +03003055
Johannes Bergc8f54702017-06-19 23:50:31 +02003056 /*
3057 * Only reconfig the SCD for the queue if the window size has
3058 * changed from current (become smaller)
3059 */
Sara Sharon0ec9257b2017-10-16 09:45:10 +03003060 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
Liad Kaufmancf961e12015-08-13 19:16:08 +03003061 /*
Johannes Bergc8f54702017-06-19 23:50:31 +02003062 * If reconfiguring an existing queue, it first must be
3063 * drained
Liad Kaufmancf961e12015-08-13 19:16:08 +03003064 */
Johannes Bergc8f54702017-06-19 23:50:31 +02003065 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
3066 BIT(queue));
3067 if (ret) {
3068 IWL_ERR(mvm,
3069 "Error draining queue before reconfig\n");
3070 return ret;
3071 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03003072
Johannes Bergc8f54702017-06-19 23:50:31 +02003073 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
3074 mvmsta->sta_id, tid,
3075 buf_size, ssn);
3076 if (ret) {
3077 IWL_ERR(mvm,
3078 "Error reconfiguring TXQ #%d\n", queue);
3079 return ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03003080 }
3081 }
3082
3083 if (alloc_queue)
3084 iwl_mvm_enable_txq(mvm, queue,
3085 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
3086 &cfg, wdg_timeout);
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03003087
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02003088 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
3089 if (queue_status != IWL_MVM_QUEUE_SHARED) {
3090 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3091 if (ret)
3092 return -EIO;
3093 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003094
Liad Kaufman4ecafae2015-07-14 13:36:18 +03003095 /* No need to mark as reserved */
3096 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03003097 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03003098 spin_unlock_bh(&mvm->queue_info_lock);
3099
Sara Sharon34e10862017-02-23 13:15:07 +02003100out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01003101 /*
3102 * Even though in theory the peer could have different
3103 * aggregation reorder buffer sizes for different sessions,
3104 * our ucode doesn't allow for that and has a global limit
3105 * for each station. Therefore, use the minimum of all the
3106 * aggregation sessions and our default value.
3107 */
3108 mvmsta->max_agg_bufsize =
3109 min(mvmsta->max_agg_bufsize, buf_size);
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02003110 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003111
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03003112 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3113 sta->addr, tid);
3114
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02003115 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003116}
3117
Sara Sharon34e10862017-02-23 13:15:07 +02003118static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3119 struct iwl_mvm_sta *mvmsta,
Avraham Stern4b387902018-03-07 10:41:18 +02003120 struct iwl_mvm_tid_data *tid_data)
Sara Sharon34e10862017-02-23 13:15:07 +02003121{
Avraham Stern4b387902018-03-07 10:41:18 +02003122 u16 txq_id = tid_data->txq_id;
3123
Sara Sharon34e10862017-02-23 13:15:07 +02003124 if (iwl_mvm_has_new_tx_api(mvm))
3125 return;
3126
3127 spin_lock_bh(&mvm->queue_info_lock);
3128 /*
3129 * The TXQ is marked as reserved only if no traffic came through yet
3130 * This means no traffic has been sent on this TID (agg'd or not), so
3131 * we no longer have use for the queue. Since it hasn't even been
3132 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3133 * free.
3134 */
Avraham Stern4b387902018-03-07 10:41:18 +02003135 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
Sara Sharon34e10862017-02-23 13:15:07 +02003136 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
Avraham Stern4b387902018-03-07 10:41:18 +02003137 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3138 }
Sara Sharon34e10862017-02-23 13:15:07 +02003139
3140 spin_unlock_bh(&mvm->queue_info_lock);
3141}
3142
Johannes Berg8ca151b2013-01-24 14:25:36 +01003143int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3144 struct ieee80211_sta *sta, u16 tid)
3145{
Johannes Berg5b577a92013-11-14 18:20:04 +01003146 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003147 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3148 u16 txq_id;
3149 int err;
3150
Emmanuel Grumbachf9aa8dd2013-03-04 09:11:08 +02003151 /*
3152 * If mac80211 is cleaning its state, then say that we finished since
3153 * our state has been cleared anyway.
3154 */
3155 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3156 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3157 return 0;
3158 }
3159
Johannes Berg8ca151b2013-01-24 14:25:36 +01003160 spin_lock_bh(&mvmsta->lock);
3161
3162 txq_id = tid_data->txq_id;
3163
3164 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3165 mvmsta->sta_id, tid, txq_id, tid_data->state);
3166
Eyal Shapiraefed6642014-09-14 15:58:53 +03003167 mvmsta->agg_tids &= ~BIT(tid);
3168
Avraham Stern4b387902018-03-07 10:41:18 +02003169 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03003170
Johannes Berg8ca151b2013-01-24 14:25:36 +01003171 switch (tid_data->state) {
3172 case IWL_AGG_ON:
Johannes Berg9a886582013-02-15 19:25:00 +01003173 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003174
3175 IWL_DEBUG_TX_QUEUES(mvm,
3176 "ssn = %d, next_recl = %d\n",
3177 tid_data->ssn, tid_data->next_reclaimed);
3178
Johannes Berg8ca151b2013-01-24 14:25:36 +01003179 tid_data->ssn = 0xffff;
Johannes Bergf7f89e72014-08-05 15:24:44 +02003180 tid_data->state = IWL_AGG_OFF;
Johannes Bergf7f89e72014-08-05 15:24:44 +02003181 spin_unlock_bh(&mvmsta->lock);
3182
3183 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3184
3185 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
Johannes Bergf7f89e72014-08-05 15:24:44 +02003186 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003187 case IWL_AGG_STARTING:
3188 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3189 /*
3190 * The agg session has been stopped before it was set up. This
3191 * can happen when the AddBA timer times out for example.
3192 */
3193
3194 /* No barriers since we are under mutex */
3195 lockdep_assert_held(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003196
3197 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3198 tid_data->state = IWL_AGG_OFF;
3199 err = 0;
3200 break;
3201 default:
3202 IWL_ERR(mvm,
3203 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3204 mvmsta->sta_id, tid, tid_data->state);
3205 IWL_ERR(mvm,
3206 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3207 err = -EINVAL;
3208 }
3209
3210 spin_unlock_bh(&mvmsta->lock);
3211
3212 return err;
3213}
3214
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003215int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3216 struct ieee80211_sta *sta, u16 tid)
3217{
Johannes Berg5b577a92013-11-14 18:20:04 +01003218 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003219 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3220 u16 txq_id;
Johannes Bergb6658ff2013-07-24 13:55:51 +02003221 enum iwl_mvm_agg_state old_state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003222
3223 /*
3224 * First set the agg state to OFF to avoid calling
3225 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3226 */
3227 spin_lock_bh(&mvmsta->lock);
3228 txq_id = tid_data->txq_id;
3229 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3230 mvmsta->sta_id, tid, txq_id, tid_data->state);
Johannes Bergb6658ff2013-07-24 13:55:51 +02003231 old_state = tid_data->state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003232 tid_data->state = IWL_AGG_OFF;
Eyal Shapiraefed6642014-09-14 15:58:53 +03003233 mvmsta->agg_tids &= ~BIT(tid);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003234 spin_unlock_bh(&mvmsta->lock);
3235
Avraham Stern4b387902018-03-07 10:41:18 +02003236 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03003237
Johannes Bergb6658ff2013-07-24 13:55:51 +02003238 if (old_state >= IWL_AGG_ON) {
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02003239 iwl_mvm_drain_sta(mvm, mvmsta, true);
Sara Sharond6d517b2017-03-06 10:16:11 +02003240
Mordechai Goodsteind167e812017-05-10 16:42:53 +03003241 if (iwl_mvm_has_new_tx_api(mvm)) {
3242 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3243 BIT(tid), 0))
3244 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Sara Sharond6d517b2017-03-06 10:16:11 +02003245 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
Mordechai Goodsteind167e812017-05-10 16:42:53 +03003246 } else {
3247 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
3248 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Sara Sharond6d517b2017-03-06 10:16:11 +02003249 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
Mordechai Goodsteind167e812017-05-10 16:42:53 +03003250 }
Sara Sharond6d517b2017-03-06 10:16:11 +02003251
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02003252 iwl_mvm_drain_sta(mvm, mvmsta, false);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003253
Johannes Bergf7f89e72014-08-05 15:24:44 +02003254 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
Johannes Bergb6658ff2013-07-24 13:55:51 +02003255 }
3256
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003257 return 0;
3258}
3259
Johannes Berg8ca151b2013-01-24 14:25:36 +01003260static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3261{
Johannes Berg2dc2a152015-06-16 17:09:18 +02003262 int i, max = -1, max_offs = -1;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003263
3264 lockdep_assert_held(&mvm->mutex);
3265
Johannes Berg2dc2a152015-06-16 17:09:18 +02003266 /* Pick the unused key offset with the highest 'deleted'
3267 * counter. Every time a key is deleted, all the counters
3268 * are incremented and the one that was just deleted is
3269 * reset to zero. Thus, the highest counter is the one
3270 * that was deleted longest ago. Pick that one.
3271 */
3272 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3273 if (test_bit(i, mvm->fw_key_table))
3274 continue;
3275 if (mvm->fw_key_deleted[i] > max) {
3276 max = mvm->fw_key_deleted[i];
3277 max_offs = i;
3278 }
3279 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003280
Johannes Berg2dc2a152015-06-16 17:09:18 +02003281 if (max_offs < 0)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003282 return STA_KEY_IDX_INVALID;
3283
Johannes Berg2dc2a152015-06-16 17:09:18 +02003284 return max_offs;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003285}
3286
Johannes Berg5f7a1842015-12-11 09:36:10 +01003287static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3288 struct ieee80211_vif *vif,
3289 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003290{
Johannes Berg5b530e92014-12-23 16:00:17 +01003291 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003292
Johannes Berg5f7a1842015-12-11 09:36:10 +01003293 if (sta)
3294 return iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003295
3296 /*
3297 * The device expects GTKs for station interfaces to be
3298 * installed as GTKs for the AP station. If we have no
3299 * station ID, then use AP's station ID.
3300 */
3301 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02003302 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Avri Altman9513c5e2015-10-19 16:29:11 +02003303 u8 sta_id = mvmvif->ap_sta_id;
3304
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03003305 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3306 lockdep_is_held(&mvm->mutex));
3307
Avri Altman9513c5e2015-10-19 16:29:11 +02003308 /*
3309 * It is possible that the 'sta' parameter is NULL,
3310 * for example when a GTK is removed - the sta_id will then
3311 * be the AP ID, and no station was passed by mac80211.
3312 */
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03003313 if (IS_ERR_OR_NULL(sta))
3314 return NULL;
3315
3316 return iwl_mvm_sta_from_mac80211(sta);
Avri Altman9513c5e2015-10-19 16:29:11 +02003317 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003318
Johannes Berg5f7a1842015-12-11 09:36:10 +01003319 return NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003320}
3321
3322static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
David Spinadel85aeb582017-03-30 19:43:53 +03003323 u32 sta_id,
Sara Sharon45c458b2016-11-09 15:43:26 +02003324 struct ieee80211_key_conf *key, bool mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003325 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003326 u8 key_offset, bool mfp)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003327{
Sara Sharon45c458b2016-11-09 15:43:26 +02003328 union {
3329 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3330 struct iwl_mvm_add_sta_key_cmd cmd;
3331 } u = {};
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003332 __le16 key_flags;
Johannes Berg79920742014-11-03 15:43:04 +01003333 int ret;
3334 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003335 u16 keyidx;
Sara Sharon45c458b2016-11-09 15:43:26 +02003336 u64 pn = 0;
3337 int i, size;
3338 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3339 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003340
David Spinadel85aeb582017-03-30 19:43:53 +03003341 if (sta_id == IWL_MVM_INVALID_STA)
3342 return -EINVAL;
3343
Sara Sharon45c458b2016-11-09 15:43:26 +02003344 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
Johannes Berg8ca151b2013-01-24 14:25:36 +01003345 STA_KEY_FLG_KEYID_MSK;
3346 key_flags = cpu_to_le16(keyidx);
3347 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3348
Sara Sharon45c458b2016-11-09 15:43:26 +02003349 switch (key->cipher) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003350 case WLAN_CIPHER_SUITE_TKIP:
3351 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003352 if (new_api) {
3353 memcpy((void *)&u.cmd.tx_mic_key,
3354 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3355 IWL_MIC_KEY_SIZE);
3356
3357 memcpy((void *)&u.cmd.rx_mic_key,
3358 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3359 IWL_MIC_KEY_SIZE);
3360 pn = atomic64_read(&key->tx_pn);
3361
3362 } else {
3363 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3364 for (i = 0; i < 5; i++)
3365 u.cmd_v1.tkip_rx_ttak[i] =
3366 cpu_to_le16(tkip_p1k[i]);
3367 }
3368 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003369 break;
3370 case WLAN_CIPHER_SUITE_CCMP:
3371 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
Sara Sharon45c458b2016-11-09 15:43:26 +02003372 memcpy(u.cmd.common.key, key->key, key->keylen);
3373 if (new_api)
3374 pn = atomic64_read(&key->tx_pn);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003375 break;
Johannes Bergba3943b2014-11-12 23:54:48 +01003376 case WLAN_CIPHER_SUITE_WEP104:
3377 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
John W. Linvilleaa0cb082015-01-12 16:18:11 -05003378 /* fall through */
Johannes Bergba3943b2014-11-12 23:54:48 +01003379 case WLAN_CIPHER_SUITE_WEP40:
3380 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003381 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
Johannes Bergba3943b2014-11-12 23:54:48 +01003382 break;
Ayala Beker2a53d162016-04-07 16:21:57 +03003383 case WLAN_CIPHER_SUITE_GCMP_256:
3384 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3385 /* fall through */
3386 case WLAN_CIPHER_SUITE_GCMP:
3387 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003388 memcpy(u.cmd.common.key, key->key, key->keylen);
3389 if (new_api)
3390 pn = atomic64_read(&key->tx_pn);
Ayala Beker2a53d162016-04-07 16:21:57 +03003391 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003392 default:
Max Stepanove36e5432013-08-27 19:56:13 +03003393 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
Sara Sharon45c458b2016-11-09 15:43:26 +02003394 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003395 }
3396
Johannes Bergba3943b2014-11-12 23:54:48 +01003397 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003398 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003399 if (mfp)
3400 key_flags |= cpu_to_le16(STA_KEY_MFP);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003401
Sara Sharon45c458b2016-11-09 15:43:26 +02003402 u.cmd.common.key_offset = key_offset;
3403 u.cmd.common.key_flags = key_flags;
David Spinadel85aeb582017-03-30 19:43:53 +03003404 u.cmd.common.sta_id = sta_id;
Sara Sharon45c458b2016-11-09 15:43:26 +02003405
3406 if (new_api) {
3407 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3408 size = sizeof(u.cmd);
3409 } else {
3410 size = sizeof(u.cmd_v1);
3411 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003412
3413 status = ADD_STA_SUCCESS;
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003414 if (cmd_flags & CMD_ASYNC)
Sara Sharon45c458b2016-11-09 15:43:26 +02003415 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3416 &u.cmd);
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003417 else
Sara Sharon45c458b2016-11-09 15:43:26 +02003418 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3419 &u.cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003420
3421 switch (status) {
3422 case ADD_STA_SUCCESS:
3423 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3424 break;
3425 default:
3426 ret = -EIO;
3427 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3428 break;
3429 }
3430
3431 return ret;
3432}
3433
3434static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3435 struct ieee80211_key_conf *keyconf,
3436 u8 sta_id, bool remove_key)
3437{
3438 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3439
3440 /* verify the key details match the required command's expectations */
Ayala Beker8e160ab2016-04-11 11:37:38 +03003441 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3442 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3443 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3444 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3445 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3446 return -EINVAL;
3447
3448 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3449 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
Johannes Berg8ca151b2013-01-24 14:25:36 +01003450 return -EINVAL;
3451
3452 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3453 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3454
3455 if (remove_key) {
3456 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3457 } else {
3458 struct ieee80211_key_seq seq;
3459 const u8 *pn;
3460
Ayala Bekeraa950522016-06-01 00:28:09 +03003461 switch (keyconf->cipher) {
3462 case WLAN_CIPHER_SUITE_AES_CMAC:
3463 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3464 break;
Ayala Beker8e160ab2016-04-11 11:37:38 +03003465 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3466 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3467 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3468 break;
Ayala Bekeraa950522016-06-01 00:28:09 +03003469 default:
3470 return -EINVAL;
3471 }
3472
Ayala Beker8e160ab2016-04-11 11:37:38 +03003473 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3474 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3475 igtk_cmd.ctrl_flags |=
3476 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003477 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3478 pn = seq.aes_cmac.pn;
3479 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3480 ((u64) pn[4] << 8) |
3481 ((u64) pn[3] << 16) |
3482 ((u64) pn[2] << 24) |
3483 ((u64) pn[1] << 32) |
3484 ((u64) pn[0] << 40));
3485 }
3486
3487 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3488 remove_key ? "removing" : "installing",
3489 igtk_cmd.sta_id);
3490
Ayala Beker8e160ab2016-04-11 11:37:38 +03003491 if (!iwl_mvm_has_new_rx_api(mvm)) {
3492 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3493 .ctrl_flags = igtk_cmd.ctrl_flags,
3494 .key_id = igtk_cmd.key_id,
3495 .sta_id = igtk_cmd.sta_id,
3496 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3497 };
3498
3499 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3500 ARRAY_SIZE(igtk_cmd_v1.igtk));
3501 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3502 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3503 }
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003504 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003505 sizeof(igtk_cmd), &igtk_cmd);
3506}
3507
3508
3509static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3510 struct ieee80211_vif *vif,
3511 struct ieee80211_sta *sta)
3512{
Johannes Berg5b530e92014-12-23 16:00:17 +01003513 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003514
3515 if (sta)
3516 return sta->addr;
3517
3518 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02003519 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003520 u8 sta_id = mvmvif->ap_sta_id;
3521 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3522 lockdep_is_held(&mvm->mutex));
3523 return sta->addr;
3524 }
3525
3526
3527 return NULL;
3528}
3529
Johannes Berg2f6319d2014-11-12 23:39:56 +01003530static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3531 struct ieee80211_vif *vif,
3532 struct ieee80211_sta *sta,
Johannes Bergba3943b2014-11-12 23:54:48 +01003533 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003534 u8 key_offset,
Johannes Bergba3943b2014-11-12 23:54:48 +01003535 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003536{
Johannes Berg8ca151b2013-01-24 14:25:36 +01003537 int ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003538 const u8 *addr;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003539 struct ieee80211_key_seq seq;
3540 u16 p1k[5];
David Spinadel85aeb582017-03-30 19:43:53 +03003541 u32 sta_id;
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003542 bool mfp = false;
David Spinadel85aeb582017-03-30 19:43:53 +03003543
3544 if (sta) {
3545 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3546
3547 sta_id = mvm_sta->sta_id;
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003548 mfp = sta->mfp;
David Spinadel85aeb582017-03-30 19:43:53 +03003549 } else if (vif->type == NL80211_IFTYPE_AP &&
3550 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3551 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3552
3553 sta_id = mvmvif->mcast_sta.sta_id;
3554 } else {
3555 IWL_ERR(mvm, "Failed to find station id\n");
3556 return -EINVAL;
3557 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003558
Johannes Berg8ca151b2013-01-24 14:25:36 +01003559 switch (keyconf->cipher) {
3560 case WLAN_CIPHER_SUITE_TKIP:
3561 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3562 /* get phase 1 key from mac80211 */
3563 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3564 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
David Spinadel85aeb582017-03-30 19:43:53 +03003565 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003566 seq.tkip.iv32, p1k, 0, key_offset,
3567 mfp);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003568 break;
3569 case WLAN_CIPHER_SUITE_CCMP:
Johannes Bergba3943b2014-11-12 23:54:48 +01003570 case WLAN_CIPHER_SUITE_WEP40:
3571 case WLAN_CIPHER_SUITE_WEP104:
Ayala Beker2a53d162016-04-07 16:21:57 +03003572 case WLAN_CIPHER_SUITE_GCMP:
3573 case WLAN_CIPHER_SUITE_GCMP_256:
David Spinadel85aeb582017-03-30 19:43:53 +03003574 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003575 0, NULL, 0, key_offset, mfp);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003576 break;
3577 default:
David Spinadel85aeb582017-03-30 19:43:53 +03003578 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003579 0, NULL, 0, key_offset, mfp);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003580 }
3581
Johannes Berg8ca151b2013-01-24 14:25:36 +01003582 return ret;
3583}
3584
Johannes Berg2f6319d2014-11-12 23:39:56 +01003585static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
Johannes Bergba3943b2014-11-12 23:54:48 +01003586 struct ieee80211_key_conf *keyconf,
3587 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003588{
Sara Sharon45c458b2016-11-09 15:43:26 +02003589 union {
3590 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3591 struct iwl_mvm_add_sta_key_cmd cmd;
3592 } u = {};
3593 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3594 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003595 __le16 key_flags;
Sara Sharon45c458b2016-11-09 15:43:26 +02003596 int ret, size;
Johannes Berg79920742014-11-03 15:43:04 +01003597 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003598
Sara Sharone4f13ad2018-01-15 13:50:59 +02003599 /* This is a valid situation for GTK removal */
David Spinadel85aeb582017-03-30 19:43:53 +03003600 if (sta_id == IWL_MVM_INVALID_STA)
Sara Sharone4f13ad2018-01-15 13:50:59 +02003601 return 0;
David Spinadel85aeb582017-03-30 19:43:53 +03003602
Emmanuel Grumbach8115efb2013-02-05 10:08:35 +02003603 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3604 STA_KEY_FLG_KEYID_MSK);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003605 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3606 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3607
Johannes Bergba3943b2014-11-12 23:54:48 +01003608 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003609 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3610
Sara Sharon45c458b2016-11-09 15:43:26 +02003611 /*
3612 * The fields assigned here are in the same location at the start
3613 * of the command, so we can do this union trick.
3614 */
3615 u.cmd.common.key_flags = key_flags;
3616 u.cmd.common.key_offset = keyconf->hw_key_idx;
3617 u.cmd.common.sta_id = sta_id;
3618
3619 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003620
Johannes Berg8ca151b2013-01-24 14:25:36 +01003621 status = ADD_STA_SUCCESS;
Sara Sharon45c458b2016-11-09 15:43:26 +02003622 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3623 &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003624
3625 switch (status) {
3626 case ADD_STA_SUCCESS:
3627 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3628 break;
3629 default:
3630 ret = -EIO;
3631 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3632 break;
3633 }
3634
3635 return ret;
3636}
3637
Johannes Berg2f6319d2014-11-12 23:39:56 +01003638int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3639 struct ieee80211_vif *vif,
3640 struct ieee80211_sta *sta,
3641 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003642 u8 key_offset)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003643{
Johannes Bergba3943b2014-11-12 23:54:48 +01003644 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003645 struct iwl_mvm_sta *mvm_sta;
David Spinadel85aeb582017-03-30 19:43:53 +03003646 u8 sta_id = IWL_MVM_INVALID_STA;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003647 int ret;
Matti Gottlieb11828db2015-06-01 15:15:11 +03003648 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
Johannes Berg2f6319d2014-11-12 23:39:56 +01003649
3650 lockdep_assert_held(&mvm->mutex);
3651
David Spinadel85aeb582017-03-30 19:43:53 +03003652 if (vif->type != NL80211_IFTYPE_AP ||
3653 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3654 /* Get the station id from the mvm local station table */
3655 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3656 if (!mvm_sta) {
3657 IWL_ERR(mvm, "Failed to find station\n");
Johannes Berg2f6319d2014-11-12 23:39:56 +01003658 return -EINVAL;
3659 }
David Spinadel85aeb582017-03-30 19:43:53 +03003660 sta_id = mvm_sta->sta_id;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003661
David Spinadel85aeb582017-03-30 19:43:53 +03003662 /*
3663 * It is possible that the 'sta' parameter is NULL, and thus
Beni Leve829b172018-02-20 13:41:54 +02003664 * there is a need to retrieve the sta from the local station
David Spinadel85aeb582017-03-30 19:43:53 +03003665 * table.
3666 */
3667 if (!sta) {
3668 sta = rcu_dereference_protected(
3669 mvm->fw_id_to_mac_id[sta_id],
3670 lockdep_is_held(&mvm->mutex));
3671 if (IS_ERR_OR_NULL(sta)) {
3672 IWL_ERR(mvm, "Invalid station id\n");
3673 return -EINVAL;
3674 }
3675 }
3676
3677 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3678 return -EINVAL;
Beni Leve829b172018-02-20 13:41:54 +02003679 } else {
3680 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3681
3682 sta_id = mvmvif->mcast_sta.sta_id;
3683 }
3684
3685 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3686 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3687 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3688 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3689 goto end;
David Spinadel85aeb582017-03-30 19:43:53 +03003690 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01003691
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003692 /* If the key_offset is not pre-assigned, we need to find a
3693 * new offset to use. In normal cases, the offset is not
3694 * pre-assigned, but during HW_RESTART we want to reuse the
3695 * same indices, so we pass them when this function is called.
3696 *
3697 * In D3 entry, we need to hardcoded the indices (because the
3698 * firmware hardcodes the PTK offset to 0). In this case, we
3699 * need to make sure we don't overwrite the hw_key_idx in the
3700 * keyconf structure, because otherwise we cannot configure
3701 * the original ones back when resuming.
3702 */
3703 if (key_offset == STA_KEY_IDX_INVALID) {
3704 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3705 if (key_offset == STA_KEY_IDX_INVALID)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003706 return -ENOSPC;
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003707 keyconf->hw_key_idx = key_offset;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003708 }
3709
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003710 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003711 if (ret)
Johannes Bergba3943b2014-11-12 23:54:48 +01003712 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003713
3714 /*
3715 * For WEP, the same key is used for multicast and unicast. Upload it
3716 * again, using the same key offset, and now pointing the other one
3717 * to the same key slot (offset).
3718 * If this fails, remove the original as well.
3719 */
David Spinadel85aeb582017-03-30 19:43:53 +03003720 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3721 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3722 sta) {
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003723 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3724 key_offset, !mcast);
Johannes Bergba3943b2014-11-12 23:54:48 +01003725 if (ret) {
Johannes Bergba3943b2014-11-12 23:54:48 +01003726 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003727 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003728 }
3729 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01003730
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003731 __set_bit(key_offset, mvm->fw_key_table);
3732
Johannes Berg2f6319d2014-11-12 23:39:56 +01003733end:
3734 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3735 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
Matti Gottlieb11828db2015-06-01 15:15:11 +03003736 sta ? sta->addr : zero_addr, ret);
Johannes Berg2f6319d2014-11-12 23:39:56 +01003737 return ret;
3738}
3739
3740int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3741 struct ieee80211_vif *vif,
3742 struct ieee80211_sta *sta,
3743 struct ieee80211_key_conf *keyconf)
3744{
Johannes Bergba3943b2014-11-12 23:54:48 +01003745 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003746 struct iwl_mvm_sta *mvm_sta;
Sara Sharon0ae98812017-01-04 14:53:58 +02003747 u8 sta_id = IWL_MVM_INVALID_STA;
Johannes Berg2dc2a152015-06-16 17:09:18 +02003748 int ret, i;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003749
3750 lockdep_assert_held(&mvm->mutex);
3751
Johannes Berg5f7a1842015-12-11 09:36:10 +01003752 /* Get the station from the mvm local station table */
3753 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
Luca Coelho71793b7d2017-03-30 12:04:47 +03003754 if (mvm_sta)
3755 sta_id = mvm_sta->sta_id;
David Spinadel85aeb582017-03-30 19:43:53 +03003756 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3757 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3758
Johannes Berg2f6319d2014-11-12 23:39:56 +01003759
3760 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3761 keyconf->keyidx, sta_id);
3762
Luca Coelho71793b7d2017-03-30 12:04:47 +03003763 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3764 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3765 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
Johannes Berg2f6319d2014-11-12 23:39:56 +01003766 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3767
3768 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3769 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3770 keyconf->hw_key_idx);
3771 return -ENOENT;
3772 }
3773
Johannes Berg2dc2a152015-06-16 17:09:18 +02003774 /* track which key was deleted last */
3775 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3776 if (mvm->fw_key_deleted[i] < U8_MAX)
3777 mvm->fw_key_deleted[i]++;
3778 }
3779 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3780
David Spinadel85aeb582017-03-30 19:43:53 +03003781 if (sta && !mvm_sta) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01003782 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3783 return 0;
3784 }
3785
Johannes Bergba3943b2014-11-12 23:54:48 +01003786 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3787 if (ret)
3788 return ret;
3789
3790 /* delete WEP key twice to get rid of (now useless) offset */
3791 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3792 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3793 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3794
3795 return ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003796}
3797
Johannes Berg8ca151b2013-01-24 14:25:36 +01003798void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3799 struct ieee80211_vif *vif,
3800 struct ieee80211_key_conf *keyconf,
3801 struct ieee80211_sta *sta, u32 iv32,
3802 u16 *phase1key)
3803{
Beni Levc3eb5362013-02-06 17:22:18 +02003804 struct iwl_mvm_sta *mvm_sta;
Johannes Bergba3943b2014-11-12 23:54:48 +01003805 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003806 bool mfp = sta ? sta->mfp : false;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003807
Beni Levc3eb5362013-02-06 17:22:18 +02003808 rcu_read_lock();
3809
Johannes Berg5f7a1842015-12-11 09:36:10 +01003810 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3811 if (WARN_ON_ONCE(!mvm_sta))
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003812 goto unlock;
David Spinadel85aeb582017-03-30 19:43:53 +03003813 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003814 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3815 mfp);
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003816
3817 unlock:
Beni Levc3eb5362013-02-06 17:22:18 +02003818 rcu_read_unlock();
Johannes Berg8ca151b2013-01-24 14:25:36 +01003819}
3820
Johannes Berg9cc40712013-02-15 22:47:48 +01003821void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3822 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003823{
Johannes Berg5b577a92013-11-14 18:20:04 +01003824 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003825 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003826 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003827 .sta_id = mvmsta->sta_id,
Emmanuel Grumbach5af01772013-06-09 12:59:24 +03003828 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
Johannes Berg9cc40712013-02-15 22:47:48 +01003829 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003830 };
3831 int ret;
3832
Sara Sharon854c5702016-01-26 13:17:47 +02003833 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3834 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003835 if (ret)
3836 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3837}
3838
Johannes Berg9cc40712013-02-15 22:47:48 +01003839void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3840 struct ieee80211_sta *sta,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003841 enum ieee80211_frame_release_type reason,
Johannes Berg3e56ead2013-02-15 22:23:18 +01003842 u16 cnt, u16 tids, bool more_data,
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003843 bool single_sta_queue)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003844{
Johannes Berg5b577a92013-11-14 18:20:04 +01003845 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003846 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003847 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003848 .sta_id = mvmsta->sta_id,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003849 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3850 .sleep_tx_count = cpu_to_le16(cnt),
Johannes Berg9cc40712013-02-15 22:47:48 +01003851 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003852 };
Johannes Berg3e56ead2013-02-15 22:23:18 +01003853 int tid, ret;
3854 unsigned long _tids = tids;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003855
Johannes Berg3e56ead2013-02-15 22:23:18 +01003856 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3857 * Note that this field is reserved and unused by firmware not
3858 * supporting GO uAPSD, so it's safe to always do this.
3859 */
3860 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3861 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3862
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003863 /* If we're releasing frames from aggregation or dqa queues then check
3864 * if all the queues that we're releasing frames from, combined, have:
Johannes Berg3e56ead2013-02-15 22:23:18 +01003865 * - more frames than the service period, in which case more_data
3866 * needs to be set
3867 * - fewer than 'cnt' frames, in which case we need to adjust the
3868 * firmware command (but do that unconditionally)
3869 */
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003870 if (single_sta_queue) {
Johannes Berg3e56ead2013-02-15 22:23:18 +01003871 int remaining = cnt;
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003872 int sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003873
3874 spin_lock_bh(&mvmsta->lock);
3875 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3876 struct iwl_mvm_tid_data *tid_data;
3877 u16 n_queued;
3878
3879 tid_data = &mvmsta->tid_data[tid];
Johannes Berg3e56ead2013-02-15 22:23:18 +01003880
Liad Kaufmandd321622017-04-05 16:25:11 +03003881 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003882 if (n_queued > remaining) {
3883 more_data = true;
3884 remaining = 0;
3885 break;
3886 }
3887 remaining -= n_queued;
3888 }
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003889 sleep_tx_count = cnt - remaining;
3890 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3891 mvmsta->sleep_tx_count = sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003892 spin_unlock_bh(&mvmsta->lock);
3893
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003894 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003895 if (WARN_ON(cnt - remaining == 0)) {
3896 ieee80211_sta_eosp(sta);
3897 return;
3898 }
3899 }
3900
3901 /* Note: this is ignored by firmware not supporting GO uAPSD */
3902 if (more_data)
Sara Sharonced19f22017-02-06 19:09:32 +02003903 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003904
3905 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3906 mvmsta->next_status_eosp = true;
Sara Sharonced19f22017-02-06 19:09:32 +02003907 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003908 } else {
Sara Sharonced19f22017-02-06 19:09:32 +02003909 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003910 }
3911
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +02003912 /* block the Tx queues until the FW updated the sleep Tx count */
3913 iwl_trans_block_txq_ptrs(mvm->trans, true);
3914
3915 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3916 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
Sara Sharon854c5702016-01-26 13:17:47 +02003917 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003918 if (ret)
3919 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3920}
Johannes Berg3e56ead2013-02-15 22:23:18 +01003921
Johannes Berg04168412015-06-23 21:22:09 +02003922void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3923 struct iwl_rx_cmd_buffer *rxb)
Johannes Berg3e56ead2013-02-15 22:23:18 +01003924{
3925 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3926 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3927 struct ieee80211_sta *sta;
3928 u32 sta_id = le32_to_cpu(notif->sta_id);
3929
3930 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
Johannes Berg04168412015-06-23 21:22:09 +02003931 return;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003932
3933 rcu_read_lock();
3934 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3935 if (!IS_ERR_OR_NULL(sta))
3936 ieee80211_sta_eosp(sta);
3937 rcu_read_unlock();
Johannes Berg3e56ead2013-02-15 22:23:18 +01003938}
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003939
3940void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3941 struct iwl_mvm_sta *mvmsta, bool disable)
3942{
3943 struct iwl_mvm_add_sta_cmd cmd = {
3944 .add_modify = STA_MODE_MODIFY,
3945 .sta_id = mvmsta->sta_id,
3946 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3947 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3948 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3949 };
3950 int ret;
3951
Sara Sharon854c5702016-01-26 13:17:47 +02003952 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3953 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003954 if (ret)
3955 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3956}
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003957
3958void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3959 struct ieee80211_sta *sta,
3960 bool disable)
3961{
3962 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3963
3964 spin_lock_bh(&mvm_sta->lock);
3965
3966 if (mvm_sta->disable_tx == disable) {
3967 spin_unlock_bh(&mvm_sta->lock);
3968 return;
3969 }
3970
3971 mvm_sta->disable_tx = disable;
3972
Johannes Bergc8f54702017-06-19 23:50:31 +02003973 /* Tell mac80211 to start/stop queuing tx for this station */
3974 ieee80211_sta_block_awake(mvm->hw, sta, disable);
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003975
3976 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3977
3978 spin_unlock_bh(&mvm_sta->lock);
3979}
3980
Sara Sharonced19f22017-02-06 19:09:32 +02003981static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3982 struct iwl_mvm_vif *mvmvif,
3983 struct iwl_mvm_int_sta *sta,
3984 bool disable)
3985{
3986 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3987 struct iwl_mvm_add_sta_cmd cmd = {
3988 .add_modify = STA_MODE_MODIFY,
3989 .sta_id = sta->sta_id,
3990 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3991 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3992 .mac_id_n_color = cpu_to_le32(id),
3993 };
3994 int ret;
3995
3996 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3997 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3998 if (ret)
3999 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4000}
4001
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03004002void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
4003 struct iwl_mvm_vif *mvmvif,
4004 bool disable)
4005{
4006 struct ieee80211_sta *sta;
4007 struct iwl_mvm_sta *mvm_sta;
4008 int i;
4009
4010 lockdep_assert_held(&mvm->mutex);
4011
4012 /* Block/unblock all the stations of the given mvmvif */
Sara Sharon0ae98812017-01-04 14:53:58 +02004013 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03004014 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
4015 lockdep_is_held(&mvm->mutex));
4016 if (IS_ERR_OR_NULL(sta))
4017 continue;
4018
4019 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4020 if (mvm_sta->mac_id_n_color !=
4021 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
4022 continue;
4023
4024 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
4025 }
Sara Sharonced19f22017-02-06 19:09:32 +02004026
4027 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
4028 return;
4029
4030 /* Need to block/unblock also multicast station */
4031 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
4032 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4033 &mvmvif->mcast_sta, disable);
4034
4035 /*
4036 * Only unblock the broadcast station (FW blocks it for immediate
4037 * quiet, not the driver)
4038 */
4039 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
4040 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4041 &mvmvif->bcast_sta, disable);
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03004042}
Luciano Coelhodc88b4b2014-11-10 11:10:14 +02004043
4044void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
4045{
4046 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4047 struct iwl_mvm_sta *mvmsta;
4048
4049 rcu_read_lock();
4050
4051 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
4052
4053 if (!WARN_ON(!mvmsta))
4054 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
4055
4056 rcu_read_unlock();
4057}
Liad Kaufmandd321622017-04-05 16:25:11 +03004058
4059u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
4060{
4061 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
4062
4063 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02004064 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
Liad Kaufmandd321622017-04-05 16:25:11 +03004065 * to align the wrap around of ssn so we compare relevant values.
4066 */
4067 if (mvm->trans->cfg->gen2)
4068 sn &= 0xff;
4069
4070 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
4071}