blob: 1887d2b9f185a7cff4afd1308a86b8853ab66263 [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03008 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020010 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Emmanuel Grumbach48831452018-01-29 10:00:05 +020011 * Copyright(c) 2018 Intel Corporation
Johannes Berg8ca151b2013-01-24 14:25:36 +010012 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
Johannes Berg8ca151b2013-01-24 14:25:36 +010022 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020023 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010024 *
25 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020026 * Intel Linux Wireless <linuxwifi@intel.com>
Johannes Berg8ca151b2013-01-24 14:25:36 +010027 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *
29 * BSD LICENSE
30 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +030031 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020033 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Liad Kaufmanbd8f3fc2018-01-17 15:25:28 +020034 * Copyright(c) 2018 Intel Corporation
Johannes Berg8ca151b2013-01-24 14:25:36 +010035 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
46 * distribution.
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *
63 *****************************************************************************/
64#include <net/mac80211.h>
65
66#include "mvm.h"
67#include "sta.h"
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +030068#include "rs.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010069
Avraham Stern337bfc92018-06-04 15:10:18 +030070static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm);
71
72static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
73 u32 sta_id,
74 struct ieee80211_key_conf *key, bool mcast,
75 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
76 u8 key_offset, bool mfp);
77
Sara Sharon854c5702016-01-26 13:17:47 +020078/*
79 * New version of ADD_STA_sta command added new fields at the end of the
80 * structure, so sending the size of the relevant API's structure is enough to
81 * support both API versions.
82 */
83static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
84{
Sara Sharonced19f22017-02-06 19:09:32 +020085 if (iwl_mvm_has_new_rx_api(mvm) ||
86 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
87 return sizeof(struct iwl_mvm_add_sta_cmd);
88 else
89 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
Sara Sharon854c5702016-01-26 13:17:47 +020090}
91
Eliad Pellerb92e6612014-01-23 17:58:23 +020092static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
93 enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +010094{
95 int sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +020096 u32 reserved_ids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +010097
Eliad Pellerb92e6612014-01-23 17:58:23 +020098 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
Johannes Berg8ca151b2013-01-24 14:25:36 +010099 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
100
101 lockdep_assert_held(&mvm->mutex);
102
Eliad Pellerb92e6612014-01-23 17:58:23 +0200103 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
104 if (iftype != NL80211_IFTYPE_STATION)
105 reserved_ids = BIT(0);
106
Johannes Berg8ca151b2013-01-24 14:25:36 +0100107 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
Sara Sharon0ae98812017-01-04 14:53:58 +0200108 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
Eliad Pellerb92e6612014-01-23 17:58:23 +0200109 if (BIT(sta_id) & reserved_ids)
110 continue;
111
Johannes Berg8ca151b2013-01-24 14:25:36 +0100112 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
113 lockdep_is_held(&mvm->mutex)))
114 return sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +0200115 }
Sara Sharon0ae98812017-01-04 14:53:58 +0200116 return IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100117}
118
Johannes Berg7a453972013-02-12 13:10:44 +0100119/* send station add/update command to firmware */
120int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Liad Kaufman24afba72015-07-28 18:56:08 +0300121 bool update, unsigned int flags)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100122{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +0100123 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300124 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
125 .sta_id = mvm_sta->sta_id,
126 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
127 .add_modify = update ? 1 : 0,
128 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
Naftali Goldstein8addabf2017-07-27 04:53:55 +0300129 STA_FLG_MIMO_EN_MSK |
130 STA_FLG_RTS_MIMO_PROT),
Liad Kaufmancf0cda12015-09-24 10:44:12 +0200131 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300132 };
Johannes Berg8ca151b2013-01-24 14:25:36 +0100133 int ret;
134 u32 status;
135 u32 agg_size = 0, mpdu_dens = 0;
136
Sara Sharonced19f22017-02-06 19:09:32 +0200137 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
138 add_sta_cmd.station_type = mvm_sta->sta_type;
139
Liad Kaufman24afba72015-07-28 18:56:08 +0300140 if (!update || (flags & STA_MODIFY_QUEUES)) {
Johannes Berg7a453972013-02-12 13:10:44 +0100141 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
Liad Kaufman24afba72015-07-28 18:56:08 +0300142
Sara Sharonbb497012016-09-29 14:52:40 +0300143 if (!iwl_mvm_has_new_tx_api(mvm)) {
144 add_sta_cmd.tfd_queue_msk =
145 cpu_to_le32(mvm_sta->tfd_queue_msk);
146
147 if (flags & STA_MODIFY_QUEUES)
148 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
149 } else {
150 WARN_ON(flags & STA_MODIFY_QUEUES);
151 }
Johannes Berg7a453972013-02-12 13:10:44 +0100152 }
Johannes Berg5bc5aaa2013-02-12 14:35:36 +0100153
154 switch (sta->bandwidth) {
155 case IEEE80211_STA_RX_BW_160:
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
157 /* fall through */
158 case IEEE80211_STA_RX_BW_80:
159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
160 /* fall through */
161 case IEEE80211_STA_RX_BW_40:
162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
163 /* fall through */
164 case IEEE80211_STA_RX_BW_20:
165 if (sta->ht_cap.ht_supported)
166 add_sta_cmd.station_flags |=
167 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
168 break;
169 }
170
171 switch (sta->rx_nss) {
172 case 1:
173 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
174 break;
175 case 2:
176 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
177 break;
178 case 3 ... 8:
179 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
180 break;
181 }
182
183 switch (sta->smps_mode) {
184 case IEEE80211_SMPS_AUTOMATIC:
185 case IEEE80211_SMPS_NUM_MODES:
186 WARN_ON(1);
187 break;
188 case IEEE80211_SMPS_STATIC:
189 /* override NSS */
190 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
191 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
192 break;
193 case IEEE80211_SMPS_DYNAMIC:
194 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
195 break;
196 case IEEE80211_SMPS_OFF:
197 /* nothing */
198 break;
199 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100200
201 if (sta->ht_cap.ht_supported) {
202 add_sta_cmd.station_flags_msk |=
203 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
204 STA_FLG_AGG_MPDU_DENS_MSK);
205
206 mpdu_dens = sta->ht_cap.ampdu_density;
207 }
208
209 if (sta->vht_cap.vht_supported) {
210 agg_size = sta->vht_cap.cap &
211 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
212 agg_size >>=
213 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
214 } else if (sta->ht_cap.ht_supported) {
215 agg_size = sta->ht_cap.ampdu_factor;
216 }
217
218 add_sta_cmd.station_flags |=
219 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
220 add_sta_cmd.station_flags |=
221 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
Gregory Greenmand94c5a82018-04-24 06:26:41 +0300222 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
Johannes Berg6ea29ce2016-12-01 16:25:30 +0100223 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100224
Johannes Berg65e25482016-04-13 14:24:22 +0200225 if (sta->wme) {
226 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
227
228 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200229 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
Johannes Berg65e25482016-04-13 14:24:22 +0200230 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200231 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
Johannes Berg65e25482016-04-13 14:24:22 +0200232 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200233 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
Johannes Berg65e25482016-04-13 14:24:22 +0200234 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200235 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
236 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
Emmanuel Grumbache71ca5e2017-02-08 14:53:32 +0200237 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
Johannes Berg65e25482016-04-13 14:24:22 +0200238 }
239
Johannes Berg8ca151b2013-01-24 14:25:36 +0100240 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +0200241 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
242 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +0300243 &add_sta_cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100244 if (ret)
245 return ret;
246
Sara Sharon837c4da2016-01-07 16:50:45 +0200247 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100248 case ADD_STA_SUCCESS:
249 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
250 break;
251 default:
252 ret = -EIO;
253 IWL_ERR(mvm, "ADD_STA failed\n");
254 break;
255 }
256
257 return ret;
258}
259
Kees Cook8cef5342017-10-24 02:29:37 -0700260static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
Sara Sharon10b2b202016-03-20 16:23:41 +0200261{
Kees Cook8cef5342017-10-24 02:29:37 -0700262 struct iwl_mvm_baid_data *data =
263 from_timer(data, t, session_timer);
264 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
Sara Sharon10b2b202016-03-20 16:23:41 +0200265 struct iwl_mvm_baid_data *ba_data;
266 struct ieee80211_sta *sta;
267 struct iwl_mvm_sta *mvm_sta;
268 unsigned long timeout;
269
270 rcu_read_lock();
271
272 ba_data = rcu_dereference(*rcu_ptr);
273
274 if (WARN_ON(!ba_data))
275 goto unlock;
276
277 if (!ba_data->timeout)
278 goto unlock;
279
280 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
281 if (time_is_after_jiffies(timeout)) {
282 mod_timer(&ba_data->session_timer, timeout);
283 goto unlock;
284 }
285
286 /* Timer expired */
287 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
Emmanuel Grumbach61dd8a82017-06-12 15:10:09 +0300288
289 /*
290 * sta should be valid unless the following happens:
291 * The firmware asserts which triggers a reconfig flow, but
292 * the reconfig fails before we set the pointer to sta into
293 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
294 * A-MDPU and hence the timer continues to run. Then, the
295 * timer expires and sta is NULL.
296 */
297 if (!sta)
298 goto unlock;
299
Sara Sharon10b2b202016-03-20 16:23:41 +0200300 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Naftali Goldstein20fc6902017-07-11 10:07:32 +0300301 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
302 sta->addr, ba_data->tid);
Sara Sharon10b2b202016-03-20 16:23:41 +0200303unlock:
304 rcu_read_unlock();
305}
306
Liad Kaufman9794c642015-08-19 17:34:28 +0300307/* Disable aggregations for a bitmap of TIDs for a given station */
308static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
309 unsigned long disable_agg_tids,
310 bool remove_queue)
311{
312 struct iwl_mvm_add_sta_cmd cmd = {};
313 struct ieee80211_sta *sta;
314 struct iwl_mvm_sta *mvmsta;
315 u32 status;
316 u8 sta_id;
317 int ret;
318
Sara Sharonbb497012016-09-29 14:52:40 +0300319 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
320 return -EINVAL;
321
Liad Kaufman9794c642015-08-19 17:34:28 +0300322 spin_lock_bh(&mvm->queue_info_lock);
323 sta_id = mvm->queue_info[queue].ra_sta_id;
324 spin_unlock_bh(&mvm->queue_info_lock);
325
326 rcu_read_lock();
327
328 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
329
330 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
331 rcu_read_unlock();
332 return -EINVAL;
333 }
334
335 mvmsta = iwl_mvm_sta_from_mac80211(sta);
336
337 mvmsta->tid_disable_agg |= disable_agg_tids;
338
339 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
340 cmd.sta_id = mvmsta->sta_id;
341 cmd.add_modify = STA_MODE_MODIFY;
342 cmd.modify_mask = STA_MODIFY_QUEUES;
343 if (disable_agg_tids)
344 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
345 if (remove_queue)
346 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
347 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
348 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
349
350 rcu_read_unlock();
351
352 /* Notify FW of queue removal from the STA queues */
353 status = ADD_STA_SUCCESS;
354 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
355 iwl_mvm_add_sta_cmd_size(mvm),
356 &cmd, &status);
357
358 return ret;
359}
360
Johannes Berg99448a82018-07-04 11:38:34 +0200361static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
362 int mac80211_queue, u8 tid, u8 flags)
363{
364 struct iwl_scd_txq_cfg_cmd cmd = {
365 .scd_queue = queue,
366 .action = SCD_CFG_DISABLE_QUEUE,
367 };
368 bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
369 int ret;
370
371 if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
372 return -EINVAL;
373
374 if (iwl_mvm_has_new_tx_api(mvm)) {
375 spin_lock_bh(&mvm->queue_info_lock);
376
377 if (remove_mac_queue)
378 mvm->hw_queue_to_mac80211[queue] &=
379 ~BIT(mac80211_queue);
380
381 spin_unlock_bh(&mvm->queue_info_lock);
382
383 iwl_trans_txq_free(mvm->trans, queue);
384
385 return 0;
386 }
387
388 spin_lock_bh(&mvm->queue_info_lock);
389
Johannes Berg1c140892018-07-04 11:58:28 +0200390 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) {
Johannes Berg99448a82018-07-04 11:38:34 +0200391 spin_unlock_bh(&mvm->queue_info_lock);
392 return 0;
393 }
394
395 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
396
397 /*
398 * If there is another TID with the same AC - don't remove the MAC queue
399 * from the mapping
400 */
401 if (tid < IWL_MAX_TID_COUNT) {
402 unsigned long tid_bitmap =
403 mvm->queue_info[queue].tid_bitmap;
404 int ac = tid_to_mac80211_ac[tid];
405 int i;
406
407 for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
408 if (tid_to_mac80211_ac[i] == ac)
409 remove_mac_queue = false;
410 }
411 }
412
413 if (remove_mac_queue)
414 mvm->hw_queue_to_mac80211[queue] &=
415 ~BIT(mac80211_queue);
Johannes Berg99448a82018-07-04 11:38:34 +0200416
Johannes Berg1c140892018-07-04 11:58:28 +0200417 cmd.action = mvm->queue_info[queue].tid_bitmap ?
Johannes Berg99448a82018-07-04 11:38:34 +0200418 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
419 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
420 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
421
422 IWL_DEBUG_TX_QUEUES(mvm,
Johannes Berg1c140892018-07-04 11:58:28 +0200423 "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
Johannes Berg99448a82018-07-04 11:38:34 +0200424 queue,
Johannes Berg1c140892018-07-04 11:58:28 +0200425 mvm->queue_info[queue].tid_bitmap,
Johannes Berg99448a82018-07-04 11:38:34 +0200426 mvm->hw_queue_to_mac80211[queue]);
427
428 /* If the queue is still enabled - nothing left to do in this func */
429 if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
430 spin_unlock_bh(&mvm->queue_info_lock);
431 return 0;
432 }
433
434 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
435 cmd.tid = mvm->queue_info[queue].txq_tid;
436
437 /* Make sure queue info is correct even though we overwrite it */
Johannes Berg1c140892018-07-04 11:58:28 +0200438 WARN(mvm->queue_info[queue].tid_bitmap ||
Johannes Berg99448a82018-07-04 11:38:34 +0200439 mvm->hw_queue_to_mac80211[queue],
Johannes Berg1c140892018-07-04 11:58:28 +0200440 "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n",
441 queue, mvm->hw_queue_to_mac80211[queue],
Johannes Berg99448a82018-07-04 11:38:34 +0200442 mvm->queue_info[queue].tid_bitmap);
443
444 /* If we are here - the queue is freed and we can zero out these vals */
Johannes Berg99448a82018-07-04 11:38:34 +0200445 mvm->queue_info[queue].tid_bitmap = 0;
446 mvm->hw_queue_to_mac80211[queue] = 0;
447
448 /* Regardless if this is a reserved TXQ for a STA - mark it as false */
449 mvm->queue_info[queue].reserved = false;
450
451 spin_unlock_bh(&mvm->queue_info_lock);
452
453 iwl_trans_txq_disable(mvm->trans, queue, false);
454 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
455 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
456
457 if (ret)
458 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
459 queue, ret);
460 return ret;
461}
462
Liad Kaufman42db09c2016-05-02 14:01:14 +0300463static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
464{
465 struct ieee80211_sta *sta;
466 struct iwl_mvm_sta *mvmsta;
467 unsigned long tid_bitmap;
468 unsigned long agg_tids = 0;
Sharon Dvir806911d2017-05-21 12:09:49 +0300469 u8 sta_id;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300470 int tid;
471
472 lockdep_assert_held(&mvm->mutex);
473
Sara Sharonbb497012016-09-29 14:52:40 +0300474 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
475 return -EINVAL;
476
Liad Kaufman42db09c2016-05-02 14:01:14 +0300477 spin_lock_bh(&mvm->queue_info_lock);
478 sta_id = mvm->queue_info[queue].ra_sta_id;
479 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
480 spin_unlock_bh(&mvm->queue_info_lock);
481
482 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
483 lockdep_is_held(&mvm->mutex));
484
485 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
486 return -EINVAL;
487
488 mvmsta = iwl_mvm_sta_from_mac80211(sta);
489
490 spin_lock_bh(&mvmsta->lock);
491 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
492 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
493 agg_tids |= BIT(tid);
494 }
495 spin_unlock_bh(&mvmsta->lock);
496
497 return agg_tids;
498}
499
Liad Kaufman9794c642015-08-19 17:34:28 +0300500/*
501 * Remove a queue from a station's resources.
502 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
503 * doesn't disable the queue
504 */
505static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
506{
507 struct ieee80211_sta *sta;
508 struct iwl_mvm_sta *mvmsta;
509 unsigned long tid_bitmap;
510 unsigned long disable_agg_tids = 0;
511 u8 sta_id;
512 int tid;
513
514 lockdep_assert_held(&mvm->mutex);
515
Sara Sharonbb497012016-09-29 14:52:40 +0300516 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
517 return -EINVAL;
518
Liad Kaufman9794c642015-08-19 17:34:28 +0300519 spin_lock_bh(&mvm->queue_info_lock);
520 sta_id = mvm->queue_info[queue].ra_sta_id;
521 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
522 spin_unlock_bh(&mvm->queue_info_lock);
523
524 rcu_read_lock();
525
526 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
527
528 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
529 rcu_read_unlock();
530 return 0;
531 }
532
533 mvmsta = iwl_mvm_sta_from_mac80211(sta);
534
535 spin_lock_bh(&mvmsta->lock);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300536 /* Unmap MAC queues and TIDs from this queue */
Liad Kaufman9794c642015-08-19 17:34:28 +0300537 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300538 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
539 disable_agg_tids |= BIT(tid);
Sara Sharon6862fce2017-02-22 19:34:17 +0200540 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman9794c642015-08-19 17:34:28 +0300541 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300542
Liad Kaufman42db09c2016-05-02 14:01:14 +0300543 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
Liad Kaufman9794c642015-08-19 17:34:28 +0300544 spin_unlock_bh(&mvmsta->lock);
545
546 rcu_read_unlock();
547
Liad Kaufman9794c642015-08-19 17:34:28 +0300548 return disable_agg_tids;
549}
550
Sara Sharon01796ff2016-11-16 17:04:36 +0200551static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
Johannes Berg724fe772018-07-04 23:02:14 +0200552 u8 new_sta_id)
Sara Sharon01796ff2016-11-16 17:04:36 +0200553{
554 struct iwl_mvm_sta *mvmsta;
555 u8 txq_curr_ac, sta_id, tid;
556 unsigned long disable_agg_tids = 0;
Johannes Berg724fe772018-07-04 23:02:14 +0200557 bool same_sta;
Sara Sharon01796ff2016-11-16 17:04:36 +0200558 int ret;
559
560 lockdep_assert_held(&mvm->mutex);
561
Sara Sharonbb497012016-09-29 14:52:40 +0300562 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
563 return -EINVAL;
564
Sara Sharon01796ff2016-11-16 17:04:36 +0200565 spin_lock_bh(&mvm->queue_info_lock);
566 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
567 sta_id = mvm->queue_info[queue].ra_sta_id;
568 tid = mvm->queue_info[queue].txq_tid;
569 spin_unlock_bh(&mvm->queue_info_lock);
570
Johannes Berg724fe772018-07-04 23:02:14 +0200571 same_sta = sta_id == new_sta_id;
572
Sara Sharon01796ff2016-11-16 17:04:36 +0200573 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
Sharon Dvire3df1e42017-02-21 10:41:31 +0200574 if (WARN_ON(!mvmsta))
575 return -EINVAL;
Sara Sharon01796ff2016-11-16 17:04:36 +0200576
577 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
578 /* Disable the queue */
579 if (disable_agg_tids)
580 iwl_mvm_invalidate_sta_queue(mvm, queue,
581 disable_agg_tids, false);
582
583 ret = iwl_mvm_disable_txq(mvm, queue,
584 mvmsta->vif->hw_queue[txq_curr_ac],
585 tid, 0);
586 if (ret) {
Sara Sharon01796ff2016-11-16 17:04:36 +0200587 IWL_ERR(mvm,
588 "Failed to free inactive queue %d (ret=%d)\n",
589 queue, ret);
590
591 return ret;
592 }
593
594 /* If TXQ is allocated to another STA, update removal in FW */
595 if (!same_sta)
596 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
597
598 return 0;
599}
600
Liad Kaufman42db09c2016-05-02 14:01:14 +0300601static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
602 unsigned long tfd_queue_mask, u8 ac)
603{
604 int queue = 0;
605 u8 ac_to_queue[IEEE80211_NUM_ACS];
606 int i;
607
Johannes Berg90d2d942018-07-04 21:57:58 +0200608 /*
609 * This protects us against grabbing a queue that's being reconfigured
610 * by the inactivity checker.
611 */
612 lockdep_assert_held(&mvm->mutex);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300613 lockdep_assert_held(&mvm->queue_info_lock);
Johannes Berg90d2d942018-07-04 21:57:58 +0200614
Sara Sharonbb497012016-09-29 14:52:40 +0300615 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
616 return -EINVAL;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300617
618 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
619
620 /* See what ACs the existing queues for this STA have */
621 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
622 /* Only DATA queues can be shared */
623 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
624 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
625 continue;
626
627 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
628 }
629
630 /*
631 * The queue to share is chosen only from DATA queues as follows (in
632 * descending priority):
633 * 1. An AC_BE queue
634 * 2. Same AC queue
635 * 3. Highest AC queue that is lower than new AC
636 * 4. Any existing AC (there always is at least 1 DATA queue)
637 */
638
639 /* Priority 1: An AC_BE queue */
640 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
641 queue = ac_to_queue[IEEE80211_AC_BE];
642 /* Priority 2: Same AC queue */
643 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
644 queue = ac_to_queue[ac];
645 /* Priority 3a: If new AC is VO and VI exists - use VI */
646 else if (ac == IEEE80211_AC_VO &&
647 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
648 queue = ac_to_queue[IEEE80211_AC_VI];
649 /* Priority 3b: No BE so only AC less than the new one is BK */
650 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
651 queue = ac_to_queue[IEEE80211_AC_BK];
652 /* Priority 4a: No BE nor BK - use VI if exists */
653 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
654 queue = ac_to_queue[IEEE80211_AC_VI];
655 /* Priority 4b: No BE, BK nor VI - use VO if exists */
656 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
657 queue = ac_to_queue[IEEE80211_AC_VO];
658
659 /* Make sure queue found (or not) is legal */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200660 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
661 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
662 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
Liad Kaufman42db09c2016-05-02 14:01:14 +0300663 IWL_ERR(mvm, "No DATA queues available to share\n");
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200664 return -ENOSPC;
665 }
666
Liad Kaufman42db09c2016-05-02 14:01:14 +0300667 return queue;
668}
669
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200670/*
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200671 * If a given queue has a higher AC than the TID stream that is being compared
672 * to, the queue needs to be redirected to the lower AC. This function does that
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200673 * in such a case, otherwise - if no redirection required - it does nothing,
674 * unless the %force param is true.
675 */
Johannes Berg6fe64d02018-07-04 22:27:46 +0200676static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
677 int ac, int ssn, unsigned int wdg_timeout,
678 bool force)
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200679{
680 struct iwl_scd_txq_cfg_cmd cmd = {
681 .scd_queue = queue,
Liad Kaufmanf7c692d2016-03-08 10:41:32 +0200682 .action = SCD_CFG_DISABLE_QUEUE,
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200683 };
684 bool shared_queue;
685 unsigned long mq;
686 int ret;
687
Sara Sharonbb497012016-09-29 14:52:40 +0300688 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
689 return -EINVAL;
690
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200691 /*
692 * If the AC is lower than current one - FIFO needs to be redirected to
693 * the lowest one of the streams in the queue. Check if this is needed
694 * here.
695 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
696 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
697 * we need to check if the numerical value of X is LARGER than of Y.
698 */
699 spin_lock_bh(&mvm->queue_info_lock);
700 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
701 spin_unlock_bh(&mvm->queue_info_lock);
702
703 IWL_DEBUG_TX_QUEUES(mvm,
704 "No redirection needed on TXQ #%d\n",
705 queue);
706 return 0;
707 }
708
709 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
710 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200711 cmd.tid = mvm->queue_info[queue].txq_tid;
Sara Sharon34e10862017-02-23 13:15:07 +0200712 mq = mvm->hw_queue_to_mac80211[queue];
Johannes Berg1c140892018-07-04 11:58:28 +0200713 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200714 spin_unlock_bh(&mvm->queue_info_lock);
715
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200716 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200717 queue, iwl_mvm_ac_to_tx_fifo[ac]);
718
719 /* Stop MAC queues and wait for this queue to empty */
720 iwl_mvm_stop_mac_queues(mvm, mq);
Sara Sharona1a57872017-03-05 11:38:58 +0200721 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200722 if (ret) {
723 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
724 queue);
725 ret = -EIO;
726 goto out;
727 }
728
729 /* Before redirecting the queue we need to de-activate it */
730 iwl_trans_txq_disable(mvm->trans, queue, false);
731 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
732 if (ret)
733 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
734 ret);
735
736 /* Make sure the SCD wrptr is correctly set before reconfiguring */
Sara Sharonca3b9c62016-06-30 16:14:02 +0300737 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200738
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200739 /* Update the TID "owner" of the queue */
740 spin_lock_bh(&mvm->queue_info_lock);
741 mvm->queue_info[queue].txq_tid = tid;
742 spin_unlock_bh(&mvm->queue_info_lock);
743
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200744 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
745
746 /* Redirect to lower AC */
747 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
Sara Sharon0ec9257b2017-10-16 09:45:10 +0300748 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200749
750 /* Update AC marking of the queue */
751 spin_lock_bh(&mvm->queue_info_lock);
752 mvm->queue_info[queue].mac80211_ac = ac;
753 spin_unlock_bh(&mvm->queue_info_lock);
754
755 /*
756 * Mark queue as shared in transport if shared
757 * Note this has to be done after queue enablement because enablement
758 * can also set this value, and there is no indication there to shared
759 * queues
760 */
761 if (shared_queue)
762 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
763
764out:
765 /* Continue using the MAC queues */
766 iwl_mvm_start_mac_queues(mvm, mq);
767
768 return ret;
769}
770
Johannes Berg99448a82018-07-04 11:38:34 +0200771static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
772 u8 minq, u8 maxq)
773{
774 int i;
775
776 lockdep_assert_held(&mvm->queue_info_lock);
777
778 /* This should not be hit with new TX path */
779 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
780 return -ENOSPC;
781
782 /* Start by looking for a free queue */
783 for (i = minq; i <= maxq; i++)
Johannes Berg1c140892018-07-04 11:58:28 +0200784 if (mvm->queue_info[i].tid_bitmap == 0 &&
Johannes Berg99448a82018-07-04 11:38:34 +0200785 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
786 return i;
787
788 return -ENOSPC;
789}
790
791static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
792 u8 sta_id, u8 tid, unsigned int timeout)
793{
794 int queue, size = IWL_DEFAULT_QUEUE_SIZE;
795
796 if (tid == IWL_MAX_TID_COUNT) {
797 tid = IWL_MGMT_TID;
798 size = IWL_MGMT_QUEUE_SIZE;
799 }
800 queue = iwl_trans_txq_alloc(mvm->trans,
801 cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
802 sta_id, tid, SCD_QUEUE_CFG, size, timeout);
803
804 if (queue < 0) {
805 IWL_DEBUG_TX_QUEUES(mvm,
806 "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
807 sta_id, tid, queue);
808 return queue;
809 }
810
811 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
812 queue, sta_id, tid);
813
814 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
815 IWL_DEBUG_TX_QUEUES(mvm,
816 "Enabling TXQ #%d (mac80211 map:0x%x)\n",
817 queue, mvm->hw_queue_to_mac80211[queue]);
818
819 return queue;
820}
821
Sara Sharon310181e2017-01-17 14:27:48 +0200822static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
823 struct ieee80211_sta *sta, u8 ac,
824 int tid)
825{
826 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
827 unsigned int wdg_timeout =
828 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
829 u8 mac_queue = mvmsta->vif->hw_queue[ac];
830 int queue = -1;
831
832 lockdep_assert_held(&mvm->mutex);
833
834 IWL_DEBUG_TX_QUEUES(mvm,
835 "Allocating queue for sta %d on tid %d\n",
836 mvmsta->sta_id, tid);
837 queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
838 wdg_timeout);
839 if (queue < 0)
840 return queue;
841
842 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
843
844 spin_lock_bh(&mvmsta->lock);
845 mvmsta->tid_data[tid].txq_id = queue;
Sara Sharon310181e2017-01-17 14:27:48 +0200846 spin_unlock_bh(&mvmsta->lock);
847
Sara Sharon310181e2017-01-17 14:27:48 +0200848 return 0;
849}
850
Johannes Berg99448a82018-07-04 11:38:34 +0200851static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
852 int mac80211_queue, u8 sta_id, u8 tid)
853{
854 bool enable_queue = true;
855
856 spin_lock_bh(&mvm->queue_info_lock);
857
858 /* Make sure this TID isn't already enabled */
859 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
860 spin_unlock_bh(&mvm->queue_info_lock);
861 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
862 queue, tid);
863 return false;
864 }
865
866 /* Update mappings and refcounts */
Johannes Berg1c140892018-07-04 11:58:28 +0200867 if (mvm->queue_info[queue].tid_bitmap)
Johannes Berg99448a82018-07-04 11:38:34 +0200868 enable_queue = false;
869
870 if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
871 WARN(mac80211_queue >=
872 BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
873 "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
874 mac80211_queue, queue, sta_id, tid);
875 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
876 }
877
Johannes Berg99448a82018-07-04 11:38:34 +0200878 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
879 mvm->queue_info[queue].ra_sta_id = sta_id;
880
881 if (enable_queue) {
882 if (tid != IWL_MAX_TID_COUNT)
883 mvm->queue_info[queue].mac80211_ac =
884 tid_to_mac80211_ac[tid];
885 else
886 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
887
888 mvm->queue_info[queue].txq_tid = tid;
889 }
890
891 IWL_DEBUG_TX_QUEUES(mvm,
Johannes Berg1c140892018-07-04 11:58:28 +0200892 "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
893 queue, mvm->queue_info[queue].tid_bitmap,
Johannes Berg99448a82018-07-04 11:38:34 +0200894 mvm->hw_queue_to_mac80211[queue]);
895
896 spin_unlock_bh(&mvm->queue_info_lock);
897
898 return enable_queue;
899}
900
901static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
902 int mac80211_queue, u16 ssn,
903 const struct iwl_trans_txq_scd_cfg *cfg,
904 unsigned int wdg_timeout)
905{
906 struct iwl_scd_txq_cfg_cmd cmd = {
907 .scd_queue = queue,
908 .action = SCD_CFG_ENABLE_QUEUE,
909 .window = cfg->frame_limit,
910 .sta_id = cfg->sta_id,
911 .ssn = cpu_to_le16(ssn),
912 .tx_fifo = cfg->fifo,
913 .aggregate = cfg->aggregate,
914 .tid = cfg->tid,
915 };
916 bool inc_ssn;
917
918 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
919 return false;
920
921 /* Send the enabling command if we need to */
922 if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
923 cfg->sta_id, cfg->tid))
924 return false;
925
926 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
927 NULL, wdg_timeout);
928 if (inc_ssn)
929 le16_add_cpu(&cmd.ssn, 1);
930
931 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
932 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
933
934 return inc_ssn;
935}
936
Johannes Bergb3a87f12018-07-04 22:13:18 +0200937static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
Liad Kaufman19aefa42016-03-08 14:29:51 +0200938{
939 struct iwl_scd_txq_cfg_cmd cmd = {
940 .scd_queue = queue,
941 .action = SCD_CFG_UPDATE_QUEUE_TID,
942 };
Liad Kaufman19aefa42016-03-08 14:29:51 +0200943 int tid;
944 unsigned long tid_bitmap;
945 int ret;
946
947 lockdep_assert_held(&mvm->mutex);
948
Sara Sharonbb497012016-09-29 14:52:40 +0300949 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
950 return;
951
Liad Kaufman19aefa42016-03-08 14:29:51 +0200952 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman19aefa42016-03-08 14:29:51 +0200953 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
954 spin_unlock_bh(&mvm->queue_info_lock);
955
956 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
957 return;
958
959 /* Find any TID for queue */
960 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
961 cmd.tid = tid;
962 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
963
964 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
Liad Kaufman341ca402016-09-18 14:51:59 +0300965 if (ret) {
Liad Kaufman19aefa42016-03-08 14:29:51 +0200966 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
967 queue, ret);
Liad Kaufman341ca402016-09-18 14:51:59 +0300968 return;
969 }
970
971 spin_lock_bh(&mvm->queue_info_lock);
972 mvm->queue_info[queue].txq_tid = tid;
973 spin_unlock_bh(&mvm->queue_info_lock);
974 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
975 queue, tid);
Liad Kaufman19aefa42016-03-08 14:29:51 +0200976}
977
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200978static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
979{
980 struct ieee80211_sta *sta;
981 struct iwl_mvm_sta *mvmsta;
Sharon Dvir806911d2017-05-21 12:09:49 +0300982 u8 sta_id;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200983 int tid = -1;
984 unsigned long tid_bitmap;
985 unsigned int wdg_timeout;
986 int ssn;
987 int ret = true;
988
Sara Sharonbb497012016-09-29 14:52:40 +0300989 /* queue sharing is disabled on new TX path */
990 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
991 return;
992
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200993 lockdep_assert_held(&mvm->mutex);
994
995 spin_lock_bh(&mvm->queue_info_lock);
996 sta_id = mvm->queue_info[queue].ra_sta_id;
997 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
998 spin_unlock_bh(&mvm->queue_info_lock);
999
1000 /* Find TID for queue, and make sure it is the only one on the queue */
1001 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1002 if (tid_bitmap != BIT(tid)) {
1003 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
1004 queue, tid_bitmap);
1005 return;
1006 }
1007
1008 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1009 tid);
1010
1011 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1012 lockdep_is_held(&mvm->mutex));
1013
1014 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1015 return;
1016
1017 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1018 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1019
1020 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1021
1022 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
1023 tid_to_mac80211_ac[tid], ssn,
1024 wdg_timeout, true);
1025 if (ret) {
1026 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1027 return;
1028 }
1029
1030 /* If aggs should be turned back on - do it */
1031 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
Emmanuel Grumbach9cd70e82016-09-20 13:40:33 +03001032 struct iwl_mvm_add_sta_cmd cmd = {0};
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001033
1034 mvmsta->tid_disable_agg &= ~BIT(tid);
1035
1036 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1037 cmd.sta_id = mvmsta->sta_id;
1038 cmd.add_modify = STA_MODE_MODIFY;
1039 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1040 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1041 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1042
1043 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1044 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1045 if (!ret) {
1046 IWL_DEBUG_TX_QUEUES(mvm,
1047 "TXQ #%d is now aggregated again\n",
1048 queue);
1049
1050 /* Mark queue intenally as aggregating again */
1051 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1052 }
1053 }
1054
1055 spin_lock_bh(&mvm->queue_info_lock);
1056 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1057 spin_unlock_bh(&mvm->queue_info_lock);
1058}
1059
Johannes Berg99448a82018-07-04 11:38:34 +02001060/*
1061 * Remove inactive TIDs of a given queue.
1062 * If all queue TIDs are inactive - mark the queue as inactive
1063 * If only some the queue TIDs are inactive - unmap them from the queue
Johannes Berg724fe772018-07-04 23:02:14 +02001064 *
1065 * Returns %true if all TIDs were removed and the queue could be reused.
Johannes Berg99448a82018-07-04 11:38:34 +02001066 */
Johannes Berg724fe772018-07-04 23:02:14 +02001067static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
Johannes Berg99448a82018-07-04 11:38:34 +02001068 struct iwl_mvm_sta *mvmsta, int queue,
Johannes Berg90d2d942018-07-04 21:57:58 +02001069 unsigned long tid_bitmap,
Johannes Bergb3a87f12018-07-04 22:13:18 +02001070 unsigned long *unshare_queues,
1071 unsigned long *changetid_queues)
Johannes Berg99448a82018-07-04 11:38:34 +02001072{
1073 int tid;
1074
1075 lockdep_assert_held(&mvmsta->lock);
1076 lockdep_assert_held(&mvm->queue_info_lock);
1077
1078 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
Johannes Berg724fe772018-07-04 23:02:14 +02001079 return false;
Johannes Berg99448a82018-07-04 11:38:34 +02001080
1081 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1082 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1083 /* If some TFDs are still queued - don't mark TID as inactive */
1084 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1085 tid_bitmap &= ~BIT(tid);
1086
1087 /* Don't mark as inactive any TID that has an active BA */
1088 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1089 tid_bitmap &= ~BIT(tid);
1090 }
1091
Johannes Berg724fe772018-07-04 23:02:14 +02001092 /* If all TIDs in the queue are inactive - return it can be reused */
Johannes Berg99448a82018-07-04 11:38:34 +02001093 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
Johannes Berg724fe772018-07-04 23:02:14 +02001094 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1095 return true;
Johannes Berg99448a82018-07-04 11:38:34 +02001096 }
1097
1098 /*
1099 * If we are here, this is a shared queue and not all TIDs timed-out.
1100 * Remove the ones that did.
1101 */
1102 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1103 int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
Johannes Bergb3a87f12018-07-04 22:13:18 +02001104 u16 tid_bitmap;
Johannes Berg99448a82018-07-04 11:38:34 +02001105
1106 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1107 mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
Johannes Berg99448a82018-07-04 11:38:34 +02001108 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
Johannes Berg99448a82018-07-04 11:38:34 +02001109
Johannes Bergb3a87f12018-07-04 22:13:18 +02001110 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1111
1112 /*
1113 * We need to take into account a situation in which a TXQ was
1114 * allocated to TID x, and then turned shared by adding TIDs y
1115 * and z. If TID x becomes inactive and is removed from the TXQ,
1116 * ownership must be given to one of the remaining TIDs.
1117 * This is mainly because if TID x continues - a new queue can't
1118 * be allocated for it as long as it is an owner of another TXQ.
1119 *
1120 * Mark this queue in the right bitmap, we'll send the command
1121 * to the firmware later.
1122 */
1123 if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1124 set_bit(queue, changetid_queues);
1125
Johannes Berg99448a82018-07-04 11:38:34 +02001126 IWL_DEBUG_TX_QUEUES(mvm,
1127 "Removing inactive TID %d from shared Q:%d\n",
1128 tid, queue);
1129 }
1130
1131 IWL_DEBUG_TX_QUEUES(mvm,
1132 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1133 mvm->queue_info[queue].tid_bitmap);
1134
1135 /*
1136 * There may be different TIDs with the same mac queues, so make
1137 * sure all TIDs have existing corresponding mac queues enabled
1138 */
1139 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1140 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1141 mvm->hw_queue_to_mac80211[queue] |=
1142 BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
1143 }
1144
1145 /* If the queue is marked as shared - "unshare" it */
Johannes Berg1c140892018-07-04 11:58:28 +02001146 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
Johannes Berg99448a82018-07-04 11:38:34 +02001147 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
Johannes Berg99448a82018-07-04 11:38:34 +02001148 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1149 queue);
Johannes Berg90d2d942018-07-04 21:57:58 +02001150 set_bit(queue, unshare_queues);
Johannes Berg99448a82018-07-04 11:38:34 +02001151 }
Johannes Berg724fe772018-07-04 23:02:14 +02001152
1153 return false;
Johannes Berg99448a82018-07-04 11:38:34 +02001154}
1155
Johannes Berg724fe772018-07-04 23:02:14 +02001156/*
1157 * Check for inactivity - this includes checking if any queue
1158 * can be unshared and finding one (and only one) that can be
1159 * reused.
1160 * This function is also invoked as a sort of clean-up task,
1161 * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1162 *
1163 * Returns the queue number, or -ENOSPC.
1164 */
1165static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
Johannes Berg99448a82018-07-04 11:38:34 +02001166{
Johannes Berg99448a82018-07-04 11:38:34 +02001167 unsigned long now = jiffies;
Johannes Berg90d2d942018-07-04 21:57:58 +02001168 unsigned long unshare_queues = 0;
Johannes Bergb3a87f12018-07-04 22:13:18 +02001169 unsigned long changetid_queues = 0;
Johannes Berg724fe772018-07-04 23:02:14 +02001170 int i, ret, free_queue = -ENOSPC;
Johannes Berg99448a82018-07-04 11:38:34 +02001171
Johannes Bergdf2a22452018-07-04 16:21:03 +02001172 lockdep_assert_held(&mvm->mutex);
1173
Johannes Berg99448a82018-07-04 11:38:34 +02001174 if (iwl_mvm_has_new_tx_api(mvm))
Johannes Berg724fe772018-07-04 23:02:14 +02001175 return -ENOSPC;
Johannes Berg99448a82018-07-04 11:38:34 +02001176
1177 spin_lock_bh(&mvm->queue_info_lock);
Johannes Berg99448a82018-07-04 11:38:34 +02001178
1179 rcu_read_lock();
1180
Johannes Berg459ab042018-07-04 13:06:53 +02001181 /* we skip the CMD queue below by starting at 1 */
1182 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1183
Johannes Berg459ab042018-07-04 13:06:53 +02001184 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
Johannes Berg99448a82018-07-04 11:38:34 +02001185 struct ieee80211_sta *sta;
1186 struct iwl_mvm_sta *mvmsta;
1187 u8 sta_id;
1188 int tid;
1189 unsigned long inactive_tid_bitmap = 0;
1190 unsigned long queue_tid_bitmap;
1191
Johannes Berg99448a82018-07-04 11:38:34 +02001192 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
Johannes Berg459ab042018-07-04 13:06:53 +02001193 if (!queue_tid_bitmap)
1194 continue;
Johannes Berg99448a82018-07-04 11:38:34 +02001195
1196 /* If TXQ isn't in active use anyway - nothing to do here... */
1197 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
Johannes Berg459ab042018-07-04 13:06:53 +02001198 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
Johannes Berg99448a82018-07-04 11:38:34 +02001199 continue;
Johannes Berg99448a82018-07-04 11:38:34 +02001200
1201 /* Check to see if there are inactive TIDs on this queue */
1202 for_each_set_bit(tid, &queue_tid_bitmap,
1203 IWL_MAX_TID_COUNT + 1) {
1204 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1205 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1206 continue;
1207
1208 inactive_tid_bitmap |= BIT(tid);
1209 }
Johannes Berg99448a82018-07-04 11:38:34 +02001210
1211 /* If all TIDs are active - finish check on this queue */
1212 if (!inactive_tid_bitmap)
1213 continue;
1214
1215 /*
1216 * If we are here - the queue hadn't been served recently and is
1217 * in use
1218 */
1219
1220 sta_id = mvm->queue_info[i].ra_sta_id;
1221 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1222
1223 /*
1224 * If the STA doesn't exist anymore, it isn't an error. It could
1225 * be that it was removed since getting the queues, and in this
1226 * case it should've inactivated its queues anyway.
1227 */
1228 if (IS_ERR_OR_NULL(sta))
1229 continue;
1230
1231 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1232
Johannes Berg459ab042018-07-04 13:06:53 +02001233 /* this isn't so nice, but works OK due to the way we loop */
1234 spin_unlock(&mvm->queue_info_lock);
1235
1236 /* and we need this locking order */
1237 spin_lock(&mvmsta->lock);
Johannes Berg99448a82018-07-04 11:38:34 +02001238 spin_lock(&mvm->queue_info_lock);
Johannes Berg724fe772018-07-04 23:02:14 +02001239 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1240 inactive_tid_bitmap,
1241 &unshare_queues,
1242 &changetid_queues);
1243 if (ret >= 0 && free_queue < 0)
1244 free_queue = ret;
Johannes Berg459ab042018-07-04 13:06:53 +02001245 /* only unlock sta lock - we still need the queue info lock */
1246 spin_unlock(&mvmsta->lock);
Johannes Berg99448a82018-07-04 11:38:34 +02001247 }
1248
1249 rcu_read_unlock();
Johannes Berg459ab042018-07-04 13:06:53 +02001250 spin_unlock_bh(&mvm->queue_info_lock);
Johannes Bergdf2a22452018-07-04 16:21:03 +02001251
1252 /* Reconfigure queues requiring reconfiguation */
Johannes Berg90d2d942018-07-04 21:57:58 +02001253 for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1254 iwl_mvm_unshare_queue(mvm, i);
Johannes Bergb3a87f12018-07-04 22:13:18 +02001255 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1256 iwl_mvm_change_queue_tid(mvm, i);
Johannes Berg724fe772018-07-04 23:02:14 +02001257
1258 if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1259 ret = iwl_mvm_free_inactive_queue(mvm, free_queue,
1260 alloc_for_sta);
1261 if (ret)
1262 return ret;
1263 }
1264
1265 return free_queue;
Johannes Berg99448a82018-07-04 11:38:34 +02001266}
1267
Johannes Bergc20e08b2018-07-04 22:59:52 +02001268static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1269 struct ieee80211_sta *sta, u8 ac, int tid,
1270 struct ieee80211_hdr *hdr)
1271{
1272 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1273 struct iwl_trans_txq_scd_cfg cfg = {
1274 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1275 .sta_id = mvmsta->sta_id,
1276 .tid = tid,
1277 .frame_limit = IWL_FRAME_LIMIT,
1278 };
1279 unsigned int wdg_timeout =
1280 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1281 u8 mac_queue = mvmsta->vif->hw_queue[ac];
1282 int queue = -1;
Johannes Bergc20e08b2018-07-04 22:59:52 +02001283 unsigned long disable_agg_tids = 0;
1284 enum iwl_mvm_agg_state queue_state;
1285 bool shared_queue = false, inc_ssn;
1286 int ssn;
1287 unsigned long tfd_queue_mask;
1288 int ret;
1289
1290 lockdep_assert_held(&mvm->mutex);
1291
1292 if (iwl_mvm_has_new_tx_api(mvm))
1293 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1294
1295 spin_lock_bh(&mvmsta->lock);
1296 tfd_queue_mask = mvmsta->tfd_queue_msk;
1297 spin_unlock_bh(&mvmsta->lock);
1298
1299 spin_lock_bh(&mvm->queue_info_lock);
1300
1301 /*
1302 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
1303 * exists
1304 */
1305 if (!ieee80211_is_data_qos(hdr->frame_control) ||
1306 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1307 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1308 IWL_MVM_DQA_MIN_MGMT_QUEUE,
1309 IWL_MVM_DQA_MAX_MGMT_QUEUE);
1310 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1311 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1312 queue);
1313
1314 /* If no such queue is found, we'll use a DATA queue instead */
1315 }
1316
1317 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1318 (mvm->queue_info[mvmsta->reserved_queue].status ==
Johannes Berg724fe772018-07-04 23:02:14 +02001319 IWL_MVM_QUEUE_RESERVED)) {
Johannes Bergc20e08b2018-07-04 22:59:52 +02001320 queue = mvmsta->reserved_queue;
1321 mvm->queue_info[queue].reserved = true;
1322 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1323 }
1324
1325 if (queue < 0)
1326 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1327 IWL_MVM_DQA_MIN_DATA_QUEUE,
1328 IWL_MVM_DQA_MAX_DATA_QUEUE);
Johannes Berg724fe772018-07-04 23:02:14 +02001329 if (queue < 0) {
1330 spin_unlock_bh(&mvm->queue_info_lock);
Johannes Bergc20e08b2018-07-04 22:59:52 +02001331
Johannes Berg724fe772018-07-04 23:02:14 +02001332 /* try harder - perhaps kill an inactive queue */
1333 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1334
1335 spin_lock_bh(&mvm->queue_info_lock);
Johannes Bergc20e08b2018-07-04 22:59:52 +02001336 }
1337
1338 /* No free queue - we'll have to share */
1339 if (queue <= 0) {
1340 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1341 if (queue > 0) {
1342 shared_queue = true;
1343 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1344 }
1345 }
1346
1347 /*
1348 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1349 * to make sure no one else takes it.
1350 * This will allow avoiding re-acquiring the lock at the end of the
1351 * configuration. On error we'll mark it back as free.
1352 */
1353 if (queue > 0 && !shared_queue)
1354 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1355
1356 spin_unlock_bh(&mvm->queue_info_lock);
1357
1358 /* This shouldn't happen - out of queues */
1359 if (WARN_ON(queue <= 0)) {
1360 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1361 tid, cfg.sta_id);
1362 return queue;
1363 }
1364
1365 /*
1366 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1367 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1368 * as aggregatable.
1369 * Mark all DATA queues as allowing to be aggregated at some point
1370 */
1371 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1372 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1373
Johannes Bergc20e08b2018-07-04 22:59:52 +02001374 IWL_DEBUG_TX_QUEUES(mvm,
1375 "Allocating %squeue #%d to sta %d on tid %d\n",
1376 shared_queue ? "shared " : "", queue,
1377 mvmsta->sta_id, tid);
1378
1379 if (shared_queue) {
1380 /* Disable any open aggs on this queue */
1381 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1382
1383 if (disable_agg_tids) {
1384 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1385 queue);
1386 iwl_mvm_invalidate_sta_queue(mvm, queue,
1387 disable_agg_tids, false);
1388 }
1389 }
1390
1391 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1392 inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
1393 ssn, &cfg, wdg_timeout);
1394 if (inc_ssn) {
1395 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1396 le16_add_cpu(&hdr->seq_ctrl, 0x10);
1397 }
1398
1399 /*
1400 * Mark queue as shared in transport if shared
1401 * Note this has to be done after queue enablement because enablement
1402 * can also set this value, and there is no indication there to shared
1403 * queues
1404 */
1405 if (shared_queue)
1406 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1407
1408 spin_lock_bh(&mvmsta->lock);
1409 /*
1410 * This looks racy, but it is not. We have only one packet for
1411 * this ra/tid in our Tx path since we stop the Qdisc when we
1412 * need to allocate a new TFD queue.
1413 */
1414 if (inc_ssn)
1415 mvmsta->tid_data[tid].seq_number += 0x10;
1416 mvmsta->tid_data[tid].txq_id = queue;
Johannes Bergc20e08b2018-07-04 22:59:52 +02001417 mvmsta->tfd_queue_msk |= BIT(queue);
1418 queue_state = mvmsta->tid_data[tid].state;
1419
1420 if (mvmsta->reserved_queue == queue)
1421 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1422 spin_unlock_bh(&mvmsta->lock);
1423
1424 if (!shared_queue) {
1425 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1426 if (ret)
1427 goto out_err;
1428
1429 /* If we need to re-enable aggregations... */
1430 if (queue_state == IWL_AGG_ON) {
1431 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1432 if (ret)
1433 goto out_err;
1434 }
1435 } else {
1436 /* Redirect queue, if needed */
1437 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
1438 wdg_timeout, false);
1439 if (ret)
1440 goto out_err;
1441 }
1442
1443 return 0;
1444
1445out_err:
1446 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
1447
1448 return ret;
1449}
1450
Liad Kaufman24afba72015-07-28 18:56:08 +03001451static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1452{
1453 if (tid == IWL_MAX_TID_COUNT)
1454 return IEEE80211_AC_VO; /* MGMT */
1455
1456 return tid_to_mac80211_ac[tid];
1457}
1458
1459static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
1460 struct ieee80211_sta *sta, int tid)
1461{
1462 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1463 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1464 struct sk_buff *skb;
1465 struct ieee80211_hdr *hdr;
1466 struct sk_buff_head deferred_tx;
1467 u8 mac_queue;
1468 bool no_queue = false; /* Marks if there is a problem with the queue */
1469 u8 ac;
1470
1471 lockdep_assert_held(&mvm->mutex);
1472
1473 skb = skb_peek(&tid_data->deferred_tx_frames);
1474 if (!skb)
1475 return;
1476 hdr = (void *)skb->data;
1477
1478 ac = iwl_mvm_tid_to_ac_queue(tid);
1479 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1480
Sara Sharon6862fce2017-02-22 19:34:17 +02001481 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
Liad Kaufman24afba72015-07-28 18:56:08 +03001482 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1483 IWL_ERR(mvm,
1484 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1485 mvmsta->sta_id, tid);
1486
1487 /*
1488 * Mark queue as problematic so later the deferred traffic is
1489 * freed, as we can do nothing with it
1490 */
1491 no_queue = true;
1492 }
1493
1494 __skb_queue_head_init(&deferred_tx);
1495
Liad Kaufmand2515a92016-03-23 16:31:08 +02001496 /* Disable bottom-halves when entering TX path */
1497 local_bh_disable();
Liad Kaufman24afba72015-07-28 18:56:08 +03001498 spin_lock(&mvmsta->lock);
1499 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
Liad Kaufmanad5de732016-09-27 16:01:10 +03001500 mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
Liad Kaufman24afba72015-07-28 18:56:08 +03001501 spin_unlock(&mvmsta->lock);
1502
Liad Kaufman24afba72015-07-28 18:56:08 +03001503 while ((skb = __skb_dequeue(&deferred_tx)))
1504 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1505 ieee80211_free_txskb(mvm->hw, skb);
1506 local_bh_enable();
1507
1508 /* Wake queue */
1509 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1510}
1511
1512void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1513{
1514 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1515 add_stream_wk);
1516 struct ieee80211_sta *sta;
1517 struct iwl_mvm_sta *mvmsta;
1518 unsigned long deferred_tid_traffic;
Johannes Bergb3422282018-07-04 16:11:14 +02001519 int sta_id, tid;
Liad Kaufman24afba72015-07-28 18:56:08 +03001520
1521 mutex_lock(&mvm->mutex);
1522
Johannes Berg724fe772018-07-04 23:02:14 +02001523 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001524
Liad Kaufman24afba72015-07-28 18:56:08 +03001525 /* Go over all stations with deferred traffic */
1526 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1527 IWL_MVM_STATION_COUNT) {
1528 clear_bit(sta_id, mvm->sta_deferred_frames);
1529 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1530 lockdep_is_held(&mvm->mutex));
1531 if (IS_ERR_OR_NULL(sta))
1532 continue;
1533
1534 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1535 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1536
1537 for_each_set_bit(tid, &deferred_tid_traffic,
1538 IWL_MAX_TID_COUNT + 1)
1539 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1540 }
1541
1542 mutex_unlock(&mvm->mutex);
1543}
1544
1545static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001546 struct ieee80211_sta *sta,
1547 enum nl80211_iftype vif_type)
Liad Kaufman24afba72015-07-28 18:56:08 +03001548{
1549 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1550 int queue;
1551
Sara Sharon396952e2017-02-22 19:40:55 +02001552 /* queue reserving is disabled on new TX path */
1553 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1554 return 0;
1555
Johannes Berg724fe772018-07-04 23:02:14 +02001556 /* run the general cleanup/unsharing of queues */
1557 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
Liad Kaufman9794c642015-08-19 17:34:28 +03001558
Liad Kaufman24afba72015-07-28 18:56:08 +03001559 spin_lock_bh(&mvm->queue_info_lock);
1560
1561 /* Make sure we have free resources for this STA */
Liad Kaufmand5216a22015-08-09 15:50:51 +03001562 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
Johannes Berg1c140892018-07-04 11:58:28 +02001563 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
Liad Kaufmancf961e12015-08-13 19:16:08 +03001564 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1565 IWL_MVM_QUEUE_FREE))
Liad Kaufmand5216a22015-08-09 15:50:51 +03001566 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1567 else
Liad Kaufman9794c642015-08-19 17:34:28 +03001568 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1569 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001570 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +03001571 if (queue < 0) {
1572 spin_unlock_bh(&mvm->queue_info_lock);
Johannes Berg724fe772018-07-04 23:02:14 +02001573 /* try again - this time kick out a queue if needed */
1574 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1575 if (queue < 0) {
1576 IWL_ERR(mvm, "No available queues for new station\n");
1577 return -ENOSPC;
1578 }
1579 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +03001580 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03001581 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman24afba72015-07-28 18:56:08 +03001582
1583 spin_unlock_bh(&mvm->queue_info_lock);
1584
1585 mvmsta->reserved_queue = queue;
1586
1587 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1588 queue, mvmsta->sta_id);
1589
1590 return 0;
1591}
1592
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001593/*
1594 * In DQA mode, after a HW restart the queues should be allocated as before, in
1595 * order to avoid race conditions when there are shared queues. This function
1596 * does the re-mapping and queue allocation.
1597 *
1598 * Note that re-enabling aggregations isn't done in this function.
1599 */
1600static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1601 struct iwl_mvm_sta *mvm_sta)
1602{
1603 unsigned int wdg_timeout =
1604 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1605 int i;
1606 struct iwl_trans_txq_scd_cfg cfg = {
1607 .sta_id = mvm_sta->sta_id,
1608 .frame_limit = IWL_FRAME_LIMIT,
1609 };
1610
Johannes Berg03c902b2016-12-02 12:03:36 +01001611 /* Make sure reserved queue is still marked as such (if allocated) */
1612 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1613 mvm->queue_info[mvm_sta->reserved_queue].status =
1614 IWL_MVM_QUEUE_RESERVED;
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001615
1616 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1617 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1618 int txq_id = tid_data->txq_id;
1619 int ac;
1620 u8 mac_queue;
1621
Sara Sharon6862fce2017-02-22 19:34:17 +02001622 if (txq_id == IWL_MVM_INVALID_QUEUE)
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001623 continue;
1624
1625 skb_queue_head_init(&tid_data->deferred_tx_frames);
1626
1627 ac = tid_to_mac80211_ac[i];
1628 mac_queue = mvm_sta->vif->hw_queue[ac];
1629
Sara Sharon310181e2017-01-17 14:27:48 +02001630 if (iwl_mvm_has_new_tx_api(mvm)) {
1631 IWL_DEBUG_TX_QUEUES(mvm,
1632 "Re-mapping sta %d tid %d\n",
1633 mvm_sta->sta_id, i);
1634 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
1635 mvm_sta->sta_id,
1636 i, wdg_timeout);
1637 tid_data->txq_id = txq_id;
Liad Kaufman5d390512017-10-17 16:26:00 +03001638
1639 /*
1640 * Since we don't set the seq number after reset, and HW
1641 * sets it now, FW reset will cause the seq num to start
1642 * at 0 again, so driver will need to update it
1643 * internally as well, so it keeps in sync with real val
1644 */
1645 tid_data->seq_number = 0;
Sara Sharon310181e2017-01-17 14:27:48 +02001646 } else {
1647 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001648
Sara Sharon310181e2017-01-17 14:27:48 +02001649 cfg.tid = i;
Emmanuel Grumbachcf6c6ea2017-06-13 13:18:48 +03001650 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
Sara Sharon310181e2017-01-17 14:27:48 +02001651 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1652 txq_id ==
1653 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001654
Sara Sharon310181e2017-01-17 14:27:48 +02001655 IWL_DEBUG_TX_QUEUES(mvm,
1656 "Re-mapping sta %d tid %d to queue %d\n",
1657 mvm_sta->sta_id, i, txq_id);
1658
1659 iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
1660 wdg_timeout);
Sara Sharon34e10862017-02-23 13:15:07 +02001661 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
Sara Sharon310181e2017-01-17 14:27:48 +02001662 }
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001663 }
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001664}
1665
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001666static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1667 struct iwl_mvm_int_sta *sta,
1668 const u8 *addr,
1669 u16 mac_id, u16 color)
1670{
1671 struct iwl_mvm_add_sta_cmd cmd;
1672 int ret;
Luca Coelho3f497de2017-09-02 11:05:22 +03001673 u32 status = ADD_STA_SUCCESS;
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001674
1675 lockdep_assert_held(&mvm->mutex);
1676
1677 memset(&cmd, 0, sizeof(cmd));
1678 cmd.sta_id = sta->sta_id;
1679 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1680 color));
1681 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1682 cmd.station_type = sta->type;
1683
1684 if (!iwl_mvm_has_new_tx_api(mvm))
1685 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1686 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1687
1688 if (addr)
1689 memcpy(cmd.addr, addr, ETH_ALEN);
1690
1691 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1692 iwl_mvm_add_sta_cmd_size(mvm),
1693 &cmd, &status);
1694 if (ret)
1695 return ret;
1696
1697 switch (status & IWL_ADD_STA_STATUS_MASK) {
1698 case ADD_STA_SUCCESS:
1699 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1700 return 0;
1701 default:
1702 ret = -EIO;
1703 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1704 status);
1705 break;
1706 }
1707 return ret;
1708}
1709
Johannes Berg8ca151b2013-01-24 14:25:36 +01001710int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1711 struct ieee80211_vif *vif,
1712 struct ieee80211_sta *sta)
1713{
1714 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001715 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharona571f5f2015-12-07 12:50:58 +02001716 struct iwl_mvm_rxq_dup_data *dup_data;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001717 int i, ret, sta_id;
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001718 bool sta_update = false;
1719 unsigned int sta_flags = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001720
1721 lockdep_assert_held(&mvm->mutex);
1722
1723 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
Eliad Pellerb92e6612014-01-23 17:58:23 +02001724 sta_id = iwl_mvm_find_free_sta_id(mvm,
1725 ieee80211_vif_type_p2p(vif));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001726 else
1727 sta_id = mvm_sta->sta_id;
1728
Sara Sharon0ae98812017-01-04 14:53:58 +02001729 if (sta_id == IWL_MVM_INVALID_STA)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001730 return -ENOSPC;
1731
1732 spin_lock_init(&mvm_sta->lock);
1733
Johannes Bergc8f54702017-06-19 23:50:31 +02001734 /* if this is a HW restart re-alloc existing queues */
1735 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001736 struct iwl_mvm_int_sta tmp_sta = {
1737 .sta_id = sta_id,
1738 .type = mvm_sta->sta_type,
1739 };
1740
1741 /*
1742 * First add an empty station since allocating
1743 * a queue requires a valid station
1744 */
1745 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1746 mvmvif->id, mvmvif->color);
1747 if (ret)
1748 goto err;
1749
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001750 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001751 sta_update = true;
1752 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001753 goto update_fw;
1754 }
1755
Johannes Berg8ca151b2013-01-24 14:25:36 +01001756 mvm_sta->sta_id = sta_id;
1757 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1758 mvmvif->color);
1759 mvm_sta->vif = vif;
Liad Kaufmana58bb462017-05-28 14:20:04 +03001760 if (!mvm->trans->cfg->gen2)
1761 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1762 else
1763 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001764 mvm_sta->tx_protection = 0;
1765 mvm_sta->tt_tx_protection = false;
Sara Sharonced19f22017-02-06 19:09:32 +02001766 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001767
1768 /* HW restart, don't assume the memory has been zeroed */
Liad Kaufman69191af2015-09-01 18:50:22 +03001769 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
Johannes Berg8ca151b2013-01-24 14:25:36 +01001770 mvm_sta->tfd_queue_msk = 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001771
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001772 /* for HW restart - reset everything but the sequence number */
Liad Kaufman24afba72015-07-28 18:56:08 +03001773 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001774 u16 seq = mvm_sta->tid_data[i].seq_number;
1775 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1776 mvm_sta->tid_data[i].seq_number = seq;
Liad Kaufman24afba72015-07-28 18:56:08 +03001777
Liad Kaufman24afba72015-07-28 18:56:08 +03001778 /*
1779 * Mark all queues for this STA as unallocated and defer TX
1780 * frames until the queue is allocated
1781 */
Sara Sharon6862fce2017-02-22 19:34:17 +02001782 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman24afba72015-07-28 18:56:08 +03001783 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001784 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001785 mvm_sta->deferred_traffic_tid_map = 0;
Eyal Shapiraefed6642014-09-14 15:58:53 +03001786 mvm_sta->agg_tids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001787
Sara Sharona571f5f2015-12-07 12:50:58 +02001788 if (iwl_mvm_has_new_rx_api(mvm) &&
1789 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Johannes Berg92c4dca2017-06-07 10:35:54 +02001790 int q;
1791
Sara Sharona571f5f2015-12-07 12:50:58 +02001792 dup_data = kcalloc(mvm->trans->num_rx_queues,
Johannes Berg92c4dca2017-06-07 10:35:54 +02001793 sizeof(*dup_data), GFP_KERNEL);
Sara Sharona571f5f2015-12-07 12:50:58 +02001794 if (!dup_data)
1795 return -ENOMEM;
Johannes Berg92c4dca2017-06-07 10:35:54 +02001796 /*
1797 * Initialize all the last_seq values to 0xffff which can never
1798 * compare equal to the frame's seq_ctrl in the check in
1799 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1800 * number and fragmented packets don't reach that function.
1801 *
1802 * This thus allows receiving a packet with seqno 0 and the
1803 * retry bit set as the very first packet on a new TID.
1804 */
1805 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1806 memset(dup_data[q].last_seq, 0xff,
1807 sizeof(dup_data[q].last_seq));
Sara Sharona571f5f2015-12-07 12:50:58 +02001808 mvm_sta->dup_data = dup_data;
1809 }
1810
Johannes Bergc8f54702017-06-19 23:50:31 +02001811 if (!iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufmand5216a22015-08-09 15:50:51 +03001812 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1813 ieee80211_vif_type_p2p(vif));
Liad Kaufman24afba72015-07-28 18:56:08 +03001814 if (ret)
1815 goto err;
1816 }
1817
Gregory Greenman9f66a392017-11-05 18:49:48 +02001818 /*
1819 * if rs is registered with mac80211, then "add station" will be handled
1820 * via the corresponding ops, otherwise need to notify rate scaling here
1821 */
Emmanuel Grumbach4243edb2017-12-13 11:38:48 +02001822 if (iwl_mvm_has_tlc_offload(mvm))
Gregory Greenman9f66a392017-11-05 18:49:48 +02001823 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1824
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001825update_fw:
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001826 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001827 if (ret)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001828 goto err;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001829
Johannes Berg9e848012014-08-04 14:33:42 +02001830 if (vif->type == NL80211_IFTYPE_STATION) {
1831 if (!sta->tdls) {
Sara Sharon0ae98812017-01-04 14:53:58 +02001832 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001833 mvmvif->ap_sta_id = sta_id;
1834 } else {
Sara Sharon0ae98812017-01-04 14:53:58 +02001835 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001836 }
1837 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001838
1839 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1840
1841 return 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001842
1843err:
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001844 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001845}
1846
1847int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1848 bool drain)
1849{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001850 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01001851 int ret;
1852 u32 status;
1853
1854 lockdep_assert_held(&mvm->mutex);
1855
1856 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1857 cmd.sta_id = mvmsta->sta_id;
1858 cmd.add_modify = STA_MODE_MODIFY;
1859 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1860 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1861
1862 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02001863 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1864 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001865 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001866 if (ret)
1867 return ret;
1868
Sara Sharon837c4da2016-01-07 16:50:45 +02001869 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001870 case ADD_STA_SUCCESS:
1871 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1872 mvmsta->sta_id);
1873 break;
1874 default:
1875 ret = -EIO;
1876 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1877 mvmsta->sta_id);
1878 break;
1879 }
1880
1881 return ret;
1882}
1883
1884/*
1885 * Remove a station from the FW table. Before sending the command to remove
1886 * the station validate that the station is indeed known to the driver (sanity
1887 * only).
1888 */
1889static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1890{
1891 struct ieee80211_sta *sta;
1892 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1893 .sta_id = sta_id,
1894 };
1895 int ret;
1896
1897 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1898 lockdep_is_held(&mvm->mutex));
1899
1900 /* Note: internal stations are marked as error values */
1901 if (!sta) {
1902 IWL_ERR(mvm, "Invalid station id\n");
1903 return -EINVAL;
1904 }
1905
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03001906 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01001907 sizeof(rm_sta_cmd), &rm_sta_cmd);
1908 if (ret) {
1909 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1910 return ret;
1911 }
1912
1913 return 0;
1914}
1915
Liad Kaufman24afba72015-07-28 18:56:08 +03001916static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1917 struct ieee80211_vif *vif,
1918 struct iwl_mvm_sta *mvm_sta)
1919{
1920 int ac;
1921 int i;
1922
1923 lockdep_assert_held(&mvm->mutex);
1924
1925 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
Sara Sharon6862fce2017-02-22 19:34:17 +02001926 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
Liad Kaufman24afba72015-07-28 18:56:08 +03001927 continue;
1928
1929 ac = iwl_mvm_tid_to_ac_queue(i);
1930 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1931 vif->hw_queue[ac], i, 0);
Sara Sharon6862fce2017-02-22 19:34:17 +02001932 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman24afba72015-07-28 18:56:08 +03001933 }
1934}
1935
Sara Sharond6d517b2017-03-06 10:16:11 +02001936int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1937 struct iwl_mvm_sta *mvm_sta)
1938{
Sharon Dvirbec95222017-06-12 11:40:33 +03001939 int i;
Sara Sharond6d517b2017-03-06 10:16:11 +02001940
1941 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1942 u16 txq_id;
Sharon Dvirbec95222017-06-12 11:40:33 +03001943 int ret;
Sara Sharond6d517b2017-03-06 10:16:11 +02001944
1945 spin_lock_bh(&mvm_sta->lock);
1946 txq_id = mvm_sta->tid_data[i].txq_id;
1947 spin_unlock_bh(&mvm_sta->lock);
1948
1949 if (txq_id == IWL_MVM_INVALID_QUEUE)
1950 continue;
1951
1952 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1953 if (ret)
Sharon Dvirbec95222017-06-12 11:40:33 +03001954 return ret;
Sara Sharond6d517b2017-03-06 10:16:11 +02001955 }
1956
Sharon Dvirbec95222017-06-12 11:40:33 +03001957 return 0;
Sara Sharond6d517b2017-03-06 10:16:11 +02001958}
1959
Johannes Berg8ca151b2013-01-24 14:25:36 +01001960int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1961 struct ieee80211_vif *vif,
1962 struct ieee80211_sta *sta)
1963{
1964 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001965 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharon94c3e612016-12-07 15:04:37 +02001966 u8 sta_id = mvm_sta->sta_id;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001967 int ret;
1968
1969 lockdep_assert_held(&mvm->mutex);
1970
Sara Sharona571f5f2015-12-07 12:50:58 +02001971 if (iwl_mvm_has_new_rx_api(mvm))
1972 kfree(mvm_sta->dup_data);
1973
Johannes Bergc8f54702017-06-19 23:50:31 +02001974 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1975 if (ret)
1976 return ret;
Sara Sharond6d517b2017-03-06 10:16:11 +02001977
Johannes Bergc8f54702017-06-19 23:50:31 +02001978 /* flush its queues here since we are freeing mvm_sta */
1979 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
1980 if (ret)
1981 return ret;
1982 if (iwl_mvm_has_new_tx_api(mvm)) {
1983 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1984 } else {
1985 u32 q_mask = mvm_sta->tfd_queue_msk;
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02001986
Johannes Bergc8f54702017-06-19 23:50:31 +02001987 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1988 q_mask);
1989 }
1990 if (ret)
1991 return ret;
Liad Kaufman56214742016-09-22 15:14:08 +03001992
Johannes Bergc8f54702017-06-19 23:50:31 +02001993 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001994
Johannes Bergc8f54702017-06-19 23:50:31 +02001995 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001996
Johannes Bergc8f54702017-06-19 23:50:31 +02001997 /* If there is a TXQ still marked as reserved - free it */
1998 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1999 u8 reserved_txq = mvm_sta->reserved_queue;
2000 enum iwl_mvm_queue_status *status;
2001
2002 /*
2003 * If no traffic has gone through the reserved TXQ - it
2004 * is still marked as IWL_MVM_QUEUE_RESERVED, and
2005 * should be manually marked as free again
2006 */
2007 spin_lock_bh(&mvm->queue_info_lock);
2008 status = &mvm->queue_info[reserved_txq].status;
2009 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
2010 (*status != IWL_MVM_QUEUE_FREE),
2011 "sta_id %d reserved txq %d status %d",
2012 sta_id, reserved_txq, *status)) {
Liad Kaufmana0315dea2016-07-07 13:25:59 +03002013 spin_unlock_bh(&mvm->queue_info_lock);
Johannes Bergc8f54702017-06-19 23:50:31 +02002014 return -EINVAL;
Liad Kaufmana0315dea2016-07-07 13:25:59 +03002015 }
2016
Johannes Bergc8f54702017-06-19 23:50:31 +02002017 *status = IWL_MVM_QUEUE_FREE;
2018 spin_unlock_bh(&mvm->queue_info_lock);
2019 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002020
Johannes Bergc8f54702017-06-19 23:50:31 +02002021 if (vif->type == NL80211_IFTYPE_STATION &&
2022 mvmvif->ap_sta_id == sta_id) {
2023 /* if associated - we can't remove the AP STA now */
2024 if (vif->bss_conf.assoc)
2025 return ret;
Eliad Peller37577fe2013-12-05 17:19:39 +02002026
Johannes Bergc8f54702017-06-19 23:50:31 +02002027 /* unassoc - go ahead - remove the AP STA now */
2028 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
2029
2030 /* clear d0i3_ap_sta_id if no longer relevant */
2031 if (mvm->d0i3_ap_sta_id == sta_id)
2032 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002033 }
2034
2035 /*
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03002036 * This shouldn't happen - the TDLS channel switch should be canceled
2037 * before the STA is removed.
2038 */
Sara Sharon94c3e612016-12-07 15:04:37 +02002039 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
Sara Sharon0ae98812017-01-04 14:53:58 +02002040 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03002041 cancel_delayed_work(&mvm->tdls_cs.dwork);
2042 }
2043
2044 /*
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03002045 * Make sure that the tx response code sees the station as -EBUSY and
2046 * calls the drain worker.
2047 */
2048 spin_lock_bh(&mvm_sta->lock);
Johannes Bergc8f54702017-06-19 23:50:31 +02002049 spin_unlock_bh(&mvm_sta->lock);
Sara Sharon94c3e612016-12-07 15:04:37 +02002050
Johannes Bergc8f54702017-06-19 23:50:31 +02002051 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
2052 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002053
2054 return ret;
2055}
2056
2057int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
2058 struct ieee80211_vif *vif,
2059 u8 sta_id)
2060{
2061 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
2062
2063 lockdep_assert_held(&mvm->mutex);
2064
Monam Agarwalc531c772014-03-24 00:05:56 +05302065 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002066 return ret;
2067}
2068
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002069int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
2070 struct iwl_mvm_int_sta *sta,
Sara Sharonced19f22017-02-06 19:09:32 +02002071 u32 qmask, enum nl80211_iftype iftype,
2072 enum iwl_sta_type type)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002073{
Avraham Sterndf65c8d2018-03-06 14:10:49 +02002074 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
2075 sta->sta_id == IWL_MVM_INVALID_STA) {
Eliad Pellerb92e6612014-01-23 17:58:23 +02002076 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
Sara Sharon0ae98812017-01-04 14:53:58 +02002077 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01002078 return -ENOSPC;
2079 }
2080
2081 sta->tfd_queue_msk = qmask;
Sara Sharonced19f22017-02-06 19:09:32 +02002082 sta->type = type;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002083
2084 /* put a non-NULL value so iterating over the stations won't stop */
2085 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
2086 return 0;
2087}
2088
Sara Sharon26d6c162017-01-03 12:00:19 +02002089void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002090{
Monam Agarwalc531c772014-03-24 00:05:56 +05302091 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002092 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
Sara Sharon0ae98812017-01-04 14:53:58 +02002093 sta->sta_id = IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002094}
2095
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002096static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
2097 u8 sta_id, u8 fifo)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002098{
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02002099 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
2100 mvm->cfg->base_params->wd_timeout :
2101 IWL_WATCHDOG_DISABLED;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002102
Sara Sharon310181e2017-01-17 14:27:48 +02002103 if (iwl_mvm_has_new_tx_api(mvm)) {
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002104 int tvqm_queue =
2105 iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
2106 IWL_MAX_TID_COUNT,
2107 wdg_timeout);
2108 *queue = tvqm_queue;
Johannes Bergc8f54702017-06-19 23:50:31 +02002109 } else {
Liad Kaufman28d07932015-09-01 16:36:25 +03002110 struct iwl_trans_txq_scd_cfg cfg = {
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002111 .fifo = fifo,
2112 .sta_id = sta_id,
Liad Kaufman28d07932015-09-01 16:36:25 +03002113 .tid = IWL_MAX_TID_COUNT,
2114 .aggregate = false,
2115 .frame_limit = IWL_FRAME_LIMIT,
2116 };
2117
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002118 iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
Liad Kaufman28d07932015-09-01 16:36:25 +03002119 }
Sara Sharonc5a719e2016-11-15 10:20:48 +02002120}
2121
2122int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
2123{
2124 int ret;
2125
2126 lockdep_assert_held(&mvm->mutex);
2127
2128 /* Allocate aux station and assign to it the aux queue */
2129 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
Sara Sharonced19f22017-02-06 19:09:32 +02002130 NL80211_IFTYPE_UNSPECIFIED,
2131 IWL_STA_AUX_ACTIVITY);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002132 if (ret)
2133 return ret;
2134
2135 /* Map Aux queue to fifo - needs to happen before adding Aux station */
2136 if (!iwl_mvm_has_new_tx_api(mvm))
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002137 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2138 mvm->aux_sta.sta_id,
2139 IWL_MVM_TX_FIFO_MCAST);
Liad Kaufman28d07932015-09-01 16:36:25 +03002140
Johannes Berg8ca151b2013-01-24 14:25:36 +01002141 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
2142 MAC_INDEX_AUX, 0);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002143 if (ret) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002144 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002145 return ret;
2146 }
2147
2148 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02002149 * For 22000 firmware and on we cannot add queue to a station unknown
Sara Sharonc5a719e2016-11-15 10:20:48 +02002150 * to firmware so enable queue here - after the station was added
2151 */
2152 if (iwl_mvm_has_new_tx_api(mvm))
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002153 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2154 mvm->aux_sta.sta_id,
2155 IWL_MVM_TX_FIFO_MCAST);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002156
2157 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002158}
2159
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002160int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2161{
2162 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002163 int ret;
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002164
2165 lockdep_assert_held(&mvm->mutex);
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002166
2167 /* Map snif queue to fifo - must happen before adding snif station */
2168 if (!iwl_mvm_has_new_tx_api(mvm))
2169 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2170 mvm->snif_sta.sta_id,
2171 IWL_MVM_TX_FIFO_BE);
2172
2173 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002174 mvmvif->id, 0);
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002175 if (ret)
2176 return ret;
2177
2178 /*
2179 * For 22000 firmware and on we cannot add queue to a station unknown
2180 * to firmware so enable queue here - after the station was added
2181 */
2182 if (iwl_mvm_has_new_tx_api(mvm))
2183 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2184 mvm->snif_sta.sta_id,
2185 IWL_MVM_TX_FIFO_BE);
2186
2187 return 0;
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002188}
2189
2190int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2191{
2192 int ret;
2193
2194 lockdep_assert_held(&mvm->mutex);
2195
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002196 iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
2197 IWL_MAX_TID_COUNT, 0);
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002198 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2199 if (ret)
2200 IWL_WARN(mvm, "Failed sending remove station\n");
2201
2202 return ret;
2203}
2204
2205void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2206{
2207 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2208}
2209
Johannes Berg712b24a2014-08-04 14:14:14 +02002210void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
2211{
2212 lockdep_assert_held(&mvm->mutex);
2213
2214 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2215}
2216
Johannes Berg8ca151b2013-01-24 14:25:36 +01002217/*
2218 * Send the add station command for the vif's broadcast station.
2219 * Assumes that the station was already allocated.
2220 *
2221 * @mvm: the mvm component
2222 * @vif: the interface to which the broadcast station is added
2223 * @bsta: the broadcast station to add.
2224 */
Johannes Berg013290a2014-08-04 13:38:48 +02002225int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002226{
2227 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002228 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg5023d962013-07-31 14:07:43 +02002229 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
Johannes Berga4243402014-01-20 23:46:38 +01002230 const u8 *baddr = _baddr;
Johannes Berg7daa7622017-02-24 12:02:22 +01002231 int queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002232 int ret;
Sara Sharonc5a719e2016-11-15 10:20:48 +02002233 unsigned int wdg_timeout =
2234 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2235 struct iwl_trans_txq_scd_cfg cfg = {
2236 .fifo = IWL_MVM_TX_FIFO_VO,
2237 .sta_id = mvmvif->bcast_sta.sta_id,
2238 .tid = IWL_MAX_TID_COUNT,
2239 .aggregate = false,
2240 .frame_limit = IWL_FRAME_LIMIT,
2241 };
Johannes Berg8ca151b2013-01-24 14:25:36 +01002242
2243 lockdep_assert_held(&mvm->mutex);
2244
Johannes Bergc8f54702017-06-19 23:50:31 +02002245 if (!iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufman4d339982017-03-21 17:13:16 +02002246 if (vif->type == NL80211_IFTYPE_AP ||
2247 vif->type == NL80211_IFTYPE_ADHOC)
Sara Sharon49f71712017-01-09 12:07:16 +02002248 queue = mvm->probe_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002249 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
Sara Sharon49f71712017-01-09 12:07:16 +02002250 queue = mvm->p2p_dev_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002251 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
Liad Kaufmande24f632015-08-04 15:19:18 +03002252 return -EINVAL;
2253
Liad Kaufmandf88c082016-11-24 15:31:00 +02002254 bsta->tfd_queue_msk |= BIT(queue);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002255
Sara Sharon310181e2017-01-17 14:27:48 +02002256 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
2257 &cfg, wdg_timeout);
Liad Kaufmande24f632015-08-04 15:19:18 +03002258 }
2259
Johannes Berg5023d962013-07-31 14:07:43 +02002260 if (vif->type == NL80211_IFTYPE_ADHOC)
2261 baddr = vif->bss_conf.bssid;
2262
Sara Sharon0ae98812017-01-04 14:53:58 +02002263 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01002264 return -ENOSPC;
2265
Liad Kaufmandf88c082016-11-24 15:31:00 +02002266 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2267 mvmvif->id, mvmvif->color);
2268 if (ret)
2269 return ret;
2270
2271 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02002272 * For 22000 firmware and on we cannot add queue to a station unknown
Sara Sharonc5a719e2016-11-15 10:20:48 +02002273 * to firmware so enable queue here - after the station was added
Liad Kaufmandf88c082016-11-24 15:31:00 +02002274 */
Sara Sharon310181e2017-01-17 14:27:48 +02002275 if (iwl_mvm_has_new_tx_api(mvm)) {
Johannes Berg7daa7622017-02-24 12:02:22 +01002276 queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
2277 bsta->sta_id,
2278 IWL_MAX_TID_COUNT,
2279 wdg_timeout);
2280
Luca Coelho7b758a12017-06-20 13:40:03 +03002281 if (vif->type == NL80211_IFTYPE_AP ||
2282 vif->type == NL80211_IFTYPE_ADHOC)
Sara Sharon310181e2017-01-17 14:27:48 +02002283 mvm->probe_queue = queue;
2284 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2285 mvm->p2p_dev_queue = queue;
Sara Sharon310181e2017-01-17 14:27:48 +02002286 }
Liad Kaufmandf88c082016-11-24 15:31:00 +02002287
2288 return 0;
2289}
2290
2291static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2292 struct ieee80211_vif *vif)
2293{
2294 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002295 int queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002296
2297 lockdep_assert_held(&mvm->mutex);
2298
Sara Sharond49394a2017-03-05 13:01:08 +02002299 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
2300
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002301 switch (vif->type) {
2302 case NL80211_IFTYPE_AP:
2303 case NL80211_IFTYPE_ADHOC:
2304 queue = mvm->probe_queue;
2305 break;
2306 case NL80211_IFTYPE_P2P_DEVICE:
2307 queue = mvm->p2p_dev_queue;
2308 break;
2309 default:
2310 WARN(1, "Can't free bcast queue on vif type %d\n",
2311 vif->type);
2312 return;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002313 }
2314
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002315 iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
2316 if (iwl_mvm_has_new_tx_api(mvm))
2317 return;
2318
2319 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2320 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002321}
2322
2323/* Send the FW a request to remove the station from it's internal data
2324 * structures, but DO NOT remove the entry from the local data structures. */
Johannes Berg013290a2014-08-04 13:38:48 +02002325int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002326{
Johannes Berg013290a2014-08-04 13:38:48 +02002327 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002328 int ret;
2329
2330 lockdep_assert_held(&mvm->mutex);
2331
Johannes Bergc8f54702017-06-19 23:50:31 +02002332 iwl_mvm_free_bcast_sta_queues(mvm, vif);
Liad Kaufmandf88c082016-11-24 15:31:00 +02002333
Johannes Berg013290a2014-08-04 13:38:48 +02002334 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002335 if (ret)
2336 IWL_WARN(mvm, "Failed sending remove station\n");
2337 return ret;
2338}
2339
Johannes Berg013290a2014-08-04 13:38:48 +02002340int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2341{
2342 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002343
2344 lockdep_assert_held(&mvm->mutex);
2345
Johannes Bergc8f54702017-06-19 23:50:31 +02002346 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
Sara Sharonced19f22017-02-06 19:09:32 +02002347 ieee80211_vif_type_p2p(vif),
2348 IWL_STA_GENERAL_PURPOSE);
Johannes Berg013290a2014-08-04 13:38:48 +02002349}
2350
Johannes Berg8ca151b2013-01-24 14:25:36 +01002351/* Allocate a new station entry for the broadcast station to the given vif,
2352 * and send it to the FW.
2353 * Note that each P2P mac should have its own broadcast station.
2354 *
2355 * @mvm: the mvm component
2356 * @vif: the interface to which the broadcast station is added
2357 * @bsta: the broadcast station to add. */
Luca Coelhod1973582017-06-22 16:00:25 +03002358int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002359{
2360 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002361 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002362 int ret;
2363
2364 lockdep_assert_held(&mvm->mutex);
2365
Johannes Berg013290a2014-08-04 13:38:48 +02002366 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002367 if (ret)
2368 return ret;
2369
Johannes Berg013290a2014-08-04 13:38:48 +02002370 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002371
2372 if (ret)
2373 iwl_mvm_dealloc_int_sta(mvm, bsta);
Johannes Berg013290a2014-08-04 13:38:48 +02002374
Johannes Berg8ca151b2013-01-24 14:25:36 +01002375 return ret;
2376}
2377
Johannes Berg013290a2014-08-04 13:38:48 +02002378void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2379{
2380 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2381
2382 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2383}
2384
Johannes Berg8ca151b2013-01-24 14:25:36 +01002385/*
2386 * Send the FW a request to remove the station from it's internal data
2387 * structures, and in addition remove it from the local data structure.
2388 */
Luca Coelhod1973582017-06-22 16:00:25 +03002389int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002390{
2391 int ret;
2392
2393 lockdep_assert_held(&mvm->mutex);
2394
Johannes Berg013290a2014-08-04 13:38:48 +02002395 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002396
Johannes Berg013290a2014-08-04 13:38:48 +02002397 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2398
Johannes Berg8ca151b2013-01-24 14:25:36 +01002399 return ret;
2400}
2401
Sara Sharon26d6c162017-01-03 12:00:19 +02002402/*
2403 * Allocate a new station entry for the multicast station to the given vif,
2404 * and send it to the FW.
2405 * Note that each AP/GO mac should have its own multicast station.
2406 *
2407 * @mvm: the mvm component
2408 * @vif: the interface to which the multicast station is added
2409 */
2410int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2411{
2412 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2413 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2414 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2415 const u8 *maddr = _maddr;
2416 struct iwl_trans_txq_scd_cfg cfg = {
2417 .fifo = IWL_MVM_TX_FIFO_MCAST,
2418 .sta_id = msta->sta_id,
Ilan Peer6508de02018-01-25 15:22:41 +02002419 .tid = 0,
Sara Sharon26d6c162017-01-03 12:00:19 +02002420 .aggregate = false,
2421 .frame_limit = IWL_FRAME_LIMIT,
2422 };
2423 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2424 int ret;
2425
2426 lockdep_assert_held(&mvm->mutex);
2427
Liad Kaufmanee48b722017-03-21 17:13:16 +02002428 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2429 vif->type != NL80211_IFTYPE_ADHOC))
Sara Sharon26d6c162017-01-03 12:00:19 +02002430 return -ENOTSUPP;
2431
Sara Sharonced19f22017-02-06 19:09:32 +02002432 /*
Sara Sharonfc07bd82017-12-21 15:05:28 +02002433 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2434 * invalid, so make sure we use the queue we want.
2435 * Note that this is done here as we want to avoid making DQA
2436 * changes in mac80211 layer.
2437 */
2438 if (vif->type == NL80211_IFTYPE_ADHOC) {
2439 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2440 mvmvif->cab_queue = vif->cab_queue;
2441 }
2442
2443 /*
Sara Sharonced19f22017-02-06 19:09:32 +02002444 * While in previous FWs we had to exclude cab queue from TFD queue
2445 * mask, now it is needed as any other queue.
2446 */
2447 if (!iwl_mvm_has_new_tx_api(mvm) &&
2448 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2449 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2450 &cfg, timeout);
2451 msta->tfd_queue_msk |= BIT(vif->cab_queue);
2452 }
Sara Sharon26d6c162017-01-03 12:00:19 +02002453 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2454 mvmvif->id, mvmvif->color);
2455 if (ret) {
2456 iwl_mvm_dealloc_int_sta(mvm, msta);
2457 return ret;
2458 }
2459
2460 /*
2461 * Enable cab queue after the ADD_STA command is sent.
Luca Coelho2f7a3862017-11-15 15:07:34 +02002462 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
Sara Sharonced19f22017-02-06 19:09:32 +02002463 * command with unknown station id, and for FW that doesn't support
2464 * station API since the cab queue is not included in the
2465 * tfd_queue_mask.
Sara Sharon26d6c162017-01-03 12:00:19 +02002466 */
Sara Sharon310181e2017-01-17 14:27:48 +02002467 if (iwl_mvm_has_new_tx_api(mvm)) {
2468 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2469 msta->sta_id,
Ilan Peer6508de02018-01-25 15:22:41 +02002470 0,
Sara Sharon310181e2017-01-17 14:27:48 +02002471 timeout);
Sara Sharone2af3fa2017-02-22 19:35:10 +02002472 mvmvif->cab_queue = queue;
Sara Sharonced19f22017-02-06 19:09:32 +02002473 } else if (!fw_has_api(&mvm->fw->ucode_capa,
Sara Sharonfc07bd82017-12-21 15:05:28 +02002474 IWL_UCODE_TLV_API_STA_TYPE))
Sara Sharon310181e2017-01-17 14:27:48 +02002475 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2476 &cfg, timeout);
Sara Sharon26d6c162017-01-03 12:00:19 +02002477
Avraham Stern337bfc92018-06-04 15:10:18 +03002478 if (mvmvif->ap_wep_key) {
2479 u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
2480
2481 if (key_offset == STA_KEY_IDX_INVALID)
2482 return -ENOSPC;
2483
2484 ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
2485 mvmvif->ap_wep_key, 1, 0, NULL, 0,
2486 key_offset, 0);
2487 if (ret)
2488 return ret;
2489 }
2490
Sara Sharon26d6c162017-01-03 12:00:19 +02002491 return 0;
2492}
2493
2494/*
2495 * Send the FW a request to remove the station from it's internal data
2496 * structures, and in addition remove it from the local data structure.
2497 */
2498int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2499{
2500 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2501 int ret;
2502
2503 lockdep_assert_held(&mvm->mutex);
2504
Sara Sharond49394a2017-03-05 13:01:08 +02002505 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2506
Sara Sharone2af3fa2017-02-22 19:35:10 +02002507 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
Ilan Peer6508de02018-01-25 15:22:41 +02002508 0, 0);
Sara Sharon26d6c162017-01-03 12:00:19 +02002509
2510 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2511 if (ret)
2512 IWL_WARN(mvm, "Failed sending remove station\n");
2513
2514 return ret;
2515}
2516
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002517#define IWL_MAX_RX_BA_SESSIONS 16
2518
Sara Sharonb915c102016-03-23 16:32:02 +02002519static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
Sara Sharon10b2b202016-03-20 16:23:41 +02002520{
Sara Sharonb915c102016-03-23 16:32:02 +02002521 struct iwl_mvm_delba_notif notif = {
2522 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2523 .metadata.sync = 1,
2524 .delba.baid = baid,
Sara Sharon10b2b202016-03-20 16:23:41 +02002525 };
Sara Sharonb915c102016-03-23 16:32:02 +02002526 iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
2527};
Sara Sharon10b2b202016-03-20 16:23:41 +02002528
Sara Sharonb915c102016-03-23 16:32:02 +02002529static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2530 struct iwl_mvm_baid_data *data)
2531{
2532 int i;
2533
2534 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2535
2536 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2537 int j;
2538 struct iwl_mvm_reorder_buffer *reorder_buf =
2539 &data->reorder_buf[i];
Johannes Bergdfdddd92017-09-26 12:24:51 +02002540 struct iwl_mvm_reorder_buf_entry *entries =
2541 &data->entries[i * data->entries_per_queue];
Sara Sharonb915c102016-03-23 16:32:02 +02002542
Sara Sharon06904052016-02-28 20:28:17 +02002543 spin_lock_bh(&reorder_buf->lock);
2544 if (likely(!reorder_buf->num_stored)) {
2545 spin_unlock_bh(&reorder_buf->lock);
Sara Sharonb915c102016-03-23 16:32:02 +02002546 continue;
Sara Sharon06904052016-02-28 20:28:17 +02002547 }
Sara Sharonb915c102016-03-23 16:32:02 +02002548
2549 /*
2550 * This shouldn't happen in regular DELBA since the internal
2551 * delBA notification should trigger a release of all frames in
2552 * the reorder buffer.
2553 */
2554 WARN_ON(1);
2555
2556 for (j = 0; j < reorder_buf->buf_size; j++)
Johannes Bergdfdddd92017-09-26 12:24:51 +02002557 __skb_queue_purge(&entries[j].e.frames);
Sara Sharon06904052016-02-28 20:28:17 +02002558 /*
2559 * Prevent timer re-arm. This prevents a very far fetched case
2560 * where we timed out on the notification. There may be prior
2561 * RX frames pending in the RX queue before the notification
2562 * that might get processed between now and the actual deletion
2563 * and we would re-arm the timer although we are deleting the
2564 * reorder buffer.
2565 */
2566 reorder_buf->removed = true;
2567 spin_unlock_bh(&reorder_buf->lock);
2568 del_timer_sync(&reorder_buf->reorder_timer);
Sara Sharonb915c102016-03-23 16:32:02 +02002569 }
2570}
2571
2572static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
Sara Sharonb915c102016-03-23 16:32:02 +02002573 struct iwl_mvm_baid_data *data,
Luca Coelho514c30692018-06-24 11:59:54 +03002574 u16 ssn, u16 buf_size)
Sara Sharonb915c102016-03-23 16:32:02 +02002575{
2576 int i;
2577
2578 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2579 struct iwl_mvm_reorder_buffer *reorder_buf =
2580 &data->reorder_buf[i];
Johannes Bergdfdddd92017-09-26 12:24:51 +02002581 struct iwl_mvm_reorder_buf_entry *entries =
2582 &data->entries[i * data->entries_per_queue];
Sara Sharonb915c102016-03-23 16:32:02 +02002583 int j;
2584
2585 reorder_buf->num_stored = 0;
2586 reorder_buf->head_sn = ssn;
2587 reorder_buf->buf_size = buf_size;
Sara Sharon06904052016-02-28 20:28:17 +02002588 /* rx reorder timer */
Kees Cook8cef5342017-10-24 02:29:37 -07002589 timer_setup(&reorder_buf->reorder_timer,
2590 iwl_mvm_reorder_timer_expired, 0);
Sara Sharon06904052016-02-28 20:28:17 +02002591 spin_lock_init(&reorder_buf->lock);
2592 reorder_buf->mvm = mvm;
Sara Sharonb915c102016-03-23 16:32:02 +02002593 reorder_buf->queue = i;
Sara Sharon5d43eab2017-02-02 12:51:39 +02002594 reorder_buf->valid = false;
Sara Sharonb915c102016-03-23 16:32:02 +02002595 for (j = 0; j < reorder_buf->buf_size; j++)
Johannes Bergdfdddd92017-09-26 12:24:51 +02002596 __skb_queue_head_init(&entries[j].e.frames);
Sara Sharonb915c102016-03-23 16:32:02 +02002597 }
Sara Sharon10b2b202016-03-20 16:23:41 +02002598}
2599
Johannes Berg8ca151b2013-01-24 14:25:36 +01002600int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Luca Coelho514c30692018-06-24 11:59:54 +03002601 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002602{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002603 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002604 struct iwl_mvm_add_sta_cmd cmd = {};
Sara Sharon10b2b202016-03-20 16:23:41 +02002605 struct iwl_mvm_baid_data *baid_data = NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002606 int ret;
2607 u32 status;
2608
2609 lockdep_assert_held(&mvm->mutex);
2610
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002611 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2612 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2613 return -ENOSPC;
2614 }
2615
Sara Sharon10b2b202016-03-20 16:23:41 +02002616 if (iwl_mvm_has_new_rx_api(mvm) && start) {
Johannes Bergdfdddd92017-09-26 12:24:51 +02002617 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2618
2619 /* sparse doesn't like the __align() so don't check */
2620#ifndef __CHECKER__
2621 /*
2622 * The division below will be OK if either the cache line size
2623 * can be divided by the entry size (ALIGN will round up) or if
2624 * if the entry size can be divided by the cache line size, in
2625 * which case the ALIGN() will do nothing.
2626 */
2627 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2628 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2629#endif
2630
2631 /*
2632 * Upward align the reorder buffer size to fill an entire cache
2633 * line for each queue, to avoid sharing cache lines between
2634 * different queues.
2635 */
2636 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2637
Sara Sharon10b2b202016-03-20 16:23:41 +02002638 /*
2639 * Allocate here so if allocation fails we can bail out early
2640 * before starting the BA session in the firmware
2641 */
Sara Sharonb915c102016-03-23 16:32:02 +02002642 baid_data = kzalloc(sizeof(*baid_data) +
2643 mvm->trans->num_rx_queues *
Johannes Bergdfdddd92017-09-26 12:24:51 +02002644 reorder_buf_size,
Sara Sharonb915c102016-03-23 16:32:02 +02002645 GFP_KERNEL);
Sara Sharon10b2b202016-03-20 16:23:41 +02002646 if (!baid_data)
2647 return -ENOMEM;
Johannes Bergdfdddd92017-09-26 12:24:51 +02002648
2649 /*
2650 * This division is why we need the above BUILD_BUG_ON(),
2651 * if that doesn't hold then this will not be right.
2652 */
2653 baid_data->entries_per_queue =
2654 reorder_buf_size / sizeof(baid_data->entries[0]);
Sara Sharon10b2b202016-03-20 16:23:41 +02002655 }
2656
Johannes Berg8ca151b2013-01-24 14:25:36 +01002657 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2658 cmd.sta_id = mvm_sta->sta_id;
2659 cmd.add_modify = STA_MODE_MODIFY;
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002660 if (start) {
2661 cmd.add_immediate_ba_tid = (u8) tid;
2662 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
Luca Coelho514c30692018-06-24 11:59:54 +03002663 cmd.rx_ba_window = cpu_to_le16(buf_size);
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002664 } else {
2665 cmd.remove_immediate_ba_tid = (u8) tid;
2666 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002667 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2668 STA_MODIFY_REMOVE_BA_TID;
2669
2670 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002671 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2672 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002673 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002674 if (ret)
Sara Sharon10b2b202016-03-20 16:23:41 +02002675 goto out_free;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002676
Sara Sharon837c4da2016-01-07 16:50:45 +02002677 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002678 case ADD_STA_SUCCESS:
Sara Sharon35263a02016-06-21 12:12:10 +03002679 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2680 start ? "start" : "stopp");
Johannes Berg8ca151b2013-01-24 14:25:36 +01002681 break;
2682 case ADD_STA_IMMEDIATE_BA_FAILURE:
2683 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2684 ret = -ENOSPC;
2685 break;
2686 default:
2687 ret = -EIO;
2688 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2689 start ? "start" : "stopp", status);
2690 break;
2691 }
2692
Sara Sharon10b2b202016-03-20 16:23:41 +02002693 if (ret)
2694 goto out_free;
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002695
Sara Sharon10b2b202016-03-20 16:23:41 +02002696 if (start) {
2697 u8 baid;
2698
2699 mvm->rx_ba_sessions++;
2700
2701 if (!iwl_mvm_has_new_rx_api(mvm))
2702 return 0;
2703
2704 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2705 ret = -EINVAL;
2706 goto out_free;
2707 }
2708 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2709 IWL_ADD_STA_BAID_SHIFT);
2710 baid_data->baid = baid;
2711 baid_data->timeout = timeout;
2712 baid_data->last_rx = jiffies;
Kees Cook8cef5342017-10-24 02:29:37 -07002713 baid_data->rcu_ptr = &mvm->baid_map[baid];
2714 timer_setup(&baid_data->session_timer,
2715 iwl_mvm_rx_agg_session_expired, 0);
Sara Sharon10b2b202016-03-20 16:23:41 +02002716 baid_data->mvm = mvm;
2717 baid_data->tid = tid;
2718 baid_data->sta_id = mvm_sta->sta_id;
2719
2720 mvm_sta->tid_to_baid[tid] = baid;
2721 if (timeout)
2722 mod_timer(&baid_data->session_timer,
2723 TU_TO_EXP_TIME(timeout * 2));
2724
Sara Sharon3f1c4c52017-10-02 12:07:59 +03002725 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
Sara Sharon10b2b202016-03-20 16:23:41 +02002726 /*
2727 * protect the BA data with RCU to cover a case where our
2728 * internal RX sync mechanism will timeout (not that it's
2729 * supposed to happen) and we will free the session data while
2730 * RX is being processed in parallel
2731 */
Sara Sharon35263a02016-06-21 12:12:10 +03002732 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2733 mvm_sta->sta_id, tid, baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002734 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2735 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
Sara Sharon60dec522016-06-21 14:14:08 +03002736 } else {
Sara Sharon10b2b202016-03-20 16:23:41 +02002737 u8 baid = mvm_sta->tid_to_baid[tid];
2738
Sara Sharon60dec522016-06-21 14:14:08 +03002739 if (mvm->rx_ba_sessions > 0)
2740 /* check that restart flow didn't zero the counter */
2741 mvm->rx_ba_sessions--;
Sara Sharon10b2b202016-03-20 16:23:41 +02002742 if (!iwl_mvm_has_new_rx_api(mvm))
2743 return 0;
2744
2745 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2746 return -EINVAL;
2747
2748 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2749 if (WARN_ON(!baid_data))
2750 return -EINVAL;
2751
2752 /* synchronize all rx queues so we can safely delete */
Sara Sharonb915c102016-03-23 16:32:02 +02002753 iwl_mvm_free_reorder(mvm, baid_data);
Sara Sharon10b2b202016-03-20 16:23:41 +02002754 del_timer_sync(&baid_data->session_timer);
Sara Sharon10b2b202016-03-20 16:23:41 +02002755 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2756 kfree_rcu(baid_data, rcu_head);
Sara Sharon35263a02016-06-21 12:12:10 +03002757 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002758 }
2759 return 0;
2760
2761out_free:
2762 kfree(baid_data);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002763 return ret;
2764}
2765
Liad Kaufman9794c642015-08-19 17:34:28 +03002766int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2767 int tid, u8 queue, bool start)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002768{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002769 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002770 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01002771 int ret;
2772 u32 status;
2773
2774 lockdep_assert_held(&mvm->mutex);
2775
2776 if (start) {
2777 mvm_sta->tfd_queue_msk |= BIT(queue);
2778 mvm_sta->tid_disable_agg &= ~BIT(tid);
2779 } else {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002780 /* In DQA-mode the queue isn't removed on agg termination */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002781 mvm_sta->tid_disable_agg |= BIT(tid);
2782 }
2783
2784 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2785 cmd.sta_id = mvm_sta->sta_id;
2786 cmd.add_modify = STA_MODE_MODIFY;
Sara Sharonbb497012016-09-29 14:52:40 +03002787 if (!iwl_mvm_has_new_tx_api(mvm))
2788 cmd.modify_mask = STA_MODIFY_QUEUES;
2789 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002790 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2791 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2792
2793 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002794 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2795 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002796 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002797 if (ret)
2798 return ret;
2799
Sara Sharon837c4da2016-01-07 16:50:45 +02002800 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002801 case ADD_STA_SUCCESS:
2802 break;
2803 default:
2804 ret = -EIO;
2805 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2806 start ? "start" : "stopp", status);
2807 break;
2808 }
2809
2810 return ret;
2811}
2812
Emmanuel Grumbachb797e3f2014-03-06 14:49:36 +02002813const u8 tid_to_mac80211_ac[] = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002814 IEEE80211_AC_BE,
2815 IEEE80211_AC_BK,
2816 IEEE80211_AC_BK,
2817 IEEE80211_AC_BE,
2818 IEEE80211_AC_VI,
2819 IEEE80211_AC_VI,
2820 IEEE80211_AC_VO,
2821 IEEE80211_AC_VO,
Liad Kaufman9794c642015-08-19 17:34:28 +03002822 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002823};
2824
Johannes Berg3e56ead2013-02-15 22:23:18 +01002825static const u8 tid_to_ucode_ac[] = {
2826 AC_BE,
2827 AC_BK,
2828 AC_BK,
2829 AC_BE,
2830 AC_VI,
2831 AC_VI,
2832 AC_VO,
2833 AC_VO,
2834};
2835
Johannes Berg8ca151b2013-01-24 14:25:36 +01002836int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2837 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2838{
Johannes Berg5b577a92013-11-14 18:20:04 +01002839 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002840 struct iwl_mvm_tid_data *tid_data;
Liad Kaufmandd321622017-04-05 16:25:11 +03002841 u16 normalized_ssn;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002842 int txq_id;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002843 int ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002844
2845 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2846 return -EINVAL;
2847
Naftali Goldsteinbd800e42017-08-28 11:51:05 +03002848 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2849 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2850 IWL_ERR(mvm,
2851 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
Johannes Berg8ca151b2013-01-24 14:25:36 +01002852 mvmsta->tid_data[tid].state);
2853 return -ENXIO;
2854 }
2855
2856 lockdep_assert_held(&mvm->mutex);
2857
Liad Kaufmanbd8f3fc2018-01-17 15:25:28 +02002858 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2859 iwl_mvm_has_new_tx_api(mvm)) {
2860 u8 ac = tid_to_mac80211_ac[tid];
2861
2862 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2863 if (ret)
2864 return ret;
2865 }
2866
Arik Nemtsovb2492502014-03-13 12:21:50 +02002867 spin_lock_bh(&mvmsta->lock);
2868
2869 /* possible race condition - we entered D0i3 while starting agg */
2870 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2871 spin_unlock_bh(&mvmsta->lock);
2872 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2873 return -EIO;
2874 }
2875
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002876 spin_lock(&mvm->queue_info_lock);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002877
Liad Kaufmancf961e12015-08-13 19:16:08 +03002878 /*
2879 * Note the possible cases:
Avraham Stern4a6d2e52018-03-05 11:26:53 +02002880 * 1. An enabled TXQ - TXQ needs to become agg'ed
2881 * 2. The TXQ hasn't yet been enabled, so find a free one and mark
2882 * it as reserved
Liad Kaufmancf961e12015-08-13 19:16:08 +03002883 */
2884 txq_id = mvmsta->tid_data[tid].txq_id;
Avraham Stern4a6d2e52018-03-05 11:26:53 +02002885 if (txq_id == IWL_MVM_INVALID_QUEUE) {
Liad Kaufman9794c642015-08-19 17:34:28 +03002886 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
Johannes Bergc8f54702017-06-19 23:50:31 +02002887 IWL_MVM_DQA_MIN_DATA_QUEUE,
2888 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002889 if (txq_id < 0) {
2890 ret = txq_id;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002891 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2892 goto release_locks;
2893 }
2894
2895 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2896 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
Avraham Stern4a6d2e52018-03-05 11:26:53 +02002897 } else if (unlikely(mvm->queue_info[txq_id].status ==
2898 IWL_MVM_QUEUE_SHARED)) {
2899 ret = -ENXIO;
2900 IWL_DEBUG_TX_QUEUES(mvm,
2901 "Can't start tid %d agg on shared queue!\n",
2902 tid);
2903 goto release_locks;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002904 }
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002905
2906 spin_unlock(&mvm->queue_info_lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002907
Liad Kaufmancf961e12015-08-13 19:16:08 +03002908 IWL_DEBUG_TX_QUEUES(mvm,
2909 "AGG for tid %d will be on queue #%d\n",
2910 tid, txq_id);
2911
Johannes Berg8ca151b2013-01-24 14:25:36 +01002912 tid_data = &mvmsta->tid_data[tid];
Johannes Berg9a886582013-02-15 19:25:00 +01002913 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002914 tid_data->txq_id = txq_id;
2915 *ssn = tid_data->ssn;
2916
2917 IWL_DEBUG_TX_QUEUES(mvm,
2918 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2919 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2920 tid_data->next_reclaimed);
2921
Liad Kaufmandd321622017-04-05 16:25:11 +03002922 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02002923 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
Liad Kaufmandd321622017-04-05 16:25:11 +03002924 * to align the wrap around of ssn so we compare relevant values.
2925 */
2926 normalized_ssn = tid_data->ssn;
2927 if (mvm->trans->cfg->gen2)
2928 normalized_ssn &= 0xff;
2929
2930 if (normalized_ssn == tid_data->next_reclaimed) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002931 tid_data->state = IWL_AGG_STARTING;
2932 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2933 } else {
2934 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2935 }
2936
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002937 ret = 0;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002938 goto out;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002939
2940release_locks:
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002941 spin_unlock(&mvm->queue_info_lock);
2942out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01002943 spin_unlock_bh(&mvmsta->lock);
2944
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002945 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002946}
2947
2948int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
Luca Coelho514c30692018-06-24 11:59:54 +03002949 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002950 bool amsdu)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002951{
Johannes Berg5b577a92013-11-14 18:20:04 +01002952 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002953 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +02002954 unsigned int wdg_timeout =
2955 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002956 int queue, ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002957 bool alloc_queue = true;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002958 enum iwl_mvm_queue_status queue_status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002959 u16 ssn;
2960
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002961 struct iwl_trans_txq_scd_cfg cfg = {
2962 .sta_id = mvmsta->sta_id,
2963 .tid = tid,
2964 .frame_limit = buf_size,
2965 .aggregate = true,
2966 };
2967
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02002968 /*
2969 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
2970 * manager, so this function should never be called in this case.
2971 */
Emmanuel Grumbach4243edb2017-12-13 11:38:48 +02002972 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02002973 return -EINVAL;
2974
Eyal Shapiraefed6642014-09-14 15:58:53 +03002975 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2976 != IWL_MAX_TID_COUNT);
2977
Johannes Berg8ca151b2013-01-24 14:25:36 +01002978 spin_lock_bh(&mvmsta->lock);
2979 ssn = tid_data->ssn;
2980 queue = tid_data->txq_id;
2981 tid_data->state = IWL_AGG_ON;
Eyal Shapiraefed6642014-09-14 15:58:53 +03002982 mvmsta->agg_tids |= BIT(tid);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002983 tid_data->ssn = 0xffff;
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002984 tid_data->amsdu_in_ampdu_allowed = amsdu;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002985 spin_unlock_bh(&mvmsta->lock);
2986
Sara Sharon34e10862017-02-23 13:15:07 +02002987 if (iwl_mvm_has_new_tx_api(mvm)) {
2988 /*
Sara Sharon0ec9257b2017-10-16 09:45:10 +03002989 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
2990 * would have failed, so if we are here there is no need to
2991 * allocate a queue.
2992 * However, if aggregation size is different than the default
2993 * size, the scheduler should be reconfigured.
2994 * We cannot do this with the new TX API, so return unsupported
2995 * for now, until it will be offloaded to firmware..
2996 * Note that if SCD default value changes - this condition
2997 * should be updated as well.
Sara Sharon34e10862017-02-23 13:15:07 +02002998 */
Sara Sharon0ec9257b2017-10-16 09:45:10 +03002999 if (buf_size < IWL_FRAME_LIMIT)
Sara Sharon34e10862017-02-23 13:15:07 +02003000 return -ENOTSUPP;
3001
3002 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3003 if (ret)
3004 return -EIO;
3005 goto out;
3006 }
3007
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02003008 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
Johannes Berg8ca151b2013-01-24 14:25:36 +01003009
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02003010 spin_lock_bh(&mvm->queue_info_lock);
3011 queue_status = mvm->queue_info[queue].status;
3012 spin_unlock_bh(&mvm->queue_info_lock);
3013
Johannes Bergc8f54702017-06-19 23:50:31 +02003014 /* Maybe there is no need to even alloc a queue... */
3015 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
3016 alloc_queue = false;
Liad Kaufmancf961e12015-08-13 19:16:08 +03003017
Johannes Bergc8f54702017-06-19 23:50:31 +02003018 /*
3019 * Only reconfig the SCD for the queue if the window size has
3020 * changed from current (become smaller)
3021 */
Sara Sharon0ec9257b2017-10-16 09:45:10 +03003022 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
Liad Kaufmancf961e12015-08-13 19:16:08 +03003023 /*
Johannes Bergc8f54702017-06-19 23:50:31 +02003024 * If reconfiguring an existing queue, it first must be
3025 * drained
Liad Kaufmancf961e12015-08-13 19:16:08 +03003026 */
Johannes Bergc8f54702017-06-19 23:50:31 +02003027 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
3028 BIT(queue));
3029 if (ret) {
3030 IWL_ERR(mvm,
3031 "Error draining queue before reconfig\n");
3032 return ret;
3033 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03003034
Johannes Bergc8f54702017-06-19 23:50:31 +02003035 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
3036 mvmsta->sta_id, tid,
3037 buf_size, ssn);
3038 if (ret) {
3039 IWL_ERR(mvm,
3040 "Error reconfiguring TXQ #%d\n", queue);
3041 return ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03003042 }
3043 }
3044
3045 if (alloc_queue)
3046 iwl_mvm_enable_txq(mvm, queue,
3047 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
3048 &cfg, wdg_timeout);
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03003049
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02003050 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
3051 if (queue_status != IWL_MVM_QUEUE_SHARED) {
3052 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3053 if (ret)
3054 return -EIO;
3055 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003056
Liad Kaufman4ecafae2015-07-14 13:36:18 +03003057 /* No need to mark as reserved */
3058 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03003059 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03003060 spin_unlock_bh(&mvm->queue_info_lock);
3061
Sara Sharon34e10862017-02-23 13:15:07 +02003062out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01003063 /*
3064 * Even though in theory the peer could have different
3065 * aggregation reorder buffer sizes for different sessions,
3066 * our ucode doesn't allow for that and has a global limit
3067 * for each station. Therefore, use the minimum of all the
3068 * aggregation sessions and our default value.
3069 */
3070 mvmsta->max_agg_bufsize =
3071 min(mvmsta->max_agg_bufsize, buf_size);
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02003072 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003073
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03003074 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3075 sta->addr, tid);
3076
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02003077 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003078}
3079
Sara Sharon34e10862017-02-23 13:15:07 +02003080static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3081 struct iwl_mvm_sta *mvmsta,
Avraham Stern4b387902018-03-07 10:41:18 +02003082 struct iwl_mvm_tid_data *tid_data)
Sara Sharon34e10862017-02-23 13:15:07 +02003083{
Avraham Stern4b387902018-03-07 10:41:18 +02003084 u16 txq_id = tid_data->txq_id;
3085
Sara Sharon34e10862017-02-23 13:15:07 +02003086 if (iwl_mvm_has_new_tx_api(mvm))
3087 return;
3088
3089 spin_lock_bh(&mvm->queue_info_lock);
3090 /*
3091 * The TXQ is marked as reserved only if no traffic came through yet
3092 * This means no traffic has been sent on this TID (agg'd or not), so
3093 * we no longer have use for the queue. Since it hasn't even been
3094 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3095 * free.
3096 */
Avraham Stern4b387902018-03-07 10:41:18 +02003097 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
Sara Sharon34e10862017-02-23 13:15:07 +02003098 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
Avraham Stern4b387902018-03-07 10:41:18 +02003099 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3100 }
Sara Sharon34e10862017-02-23 13:15:07 +02003101
3102 spin_unlock_bh(&mvm->queue_info_lock);
3103}
3104
Johannes Berg8ca151b2013-01-24 14:25:36 +01003105int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3106 struct ieee80211_sta *sta, u16 tid)
3107{
Johannes Berg5b577a92013-11-14 18:20:04 +01003108 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003109 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3110 u16 txq_id;
3111 int err;
3112
Emmanuel Grumbachf9aa8dd2013-03-04 09:11:08 +02003113 /*
3114 * If mac80211 is cleaning its state, then say that we finished since
3115 * our state has been cleared anyway.
3116 */
3117 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3118 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3119 return 0;
3120 }
3121
Johannes Berg8ca151b2013-01-24 14:25:36 +01003122 spin_lock_bh(&mvmsta->lock);
3123
3124 txq_id = tid_data->txq_id;
3125
3126 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3127 mvmsta->sta_id, tid, txq_id, tid_data->state);
3128
Eyal Shapiraefed6642014-09-14 15:58:53 +03003129 mvmsta->agg_tids &= ~BIT(tid);
3130
Avraham Stern4b387902018-03-07 10:41:18 +02003131 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03003132
Johannes Berg8ca151b2013-01-24 14:25:36 +01003133 switch (tid_data->state) {
3134 case IWL_AGG_ON:
Johannes Berg9a886582013-02-15 19:25:00 +01003135 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003136
3137 IWL_DEBUG_TX_QUEUES(mvm,
3138 "ssn = %d, next_recl = %d\n",
3139 tid_data->ssn, tid_data->next_reclaimed);
3140
Johannes Berg8ca151b2013-01-24 14:25:36 +01003141 tid_data->ssn = 0xffff;
Johannes Bergf7f89e72014-08-05 15:24:44 +02003142 tid_data->state = IWL_AGG_OFF;
Johannes Bergf7f89e72014-08-05 15:24:44 +02003143 spin_unlock_bh(&mvmsta->lock);
3144
3145 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3146
3147 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
Johannes Bergf7f89e72014-08-05 15:24:44 +02003148 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003149 case IWL_AGG_STARTING:
3150 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3151 /*
3152 * The agg session has been stopped before it was set up. This
3153 * can happen when the AddBA timer times out for example.
3154 */
3155
3156 /* No barriers since we are under mutex */
3157 lockdep_assert_held(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003158
3159 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3160 tid_data->state = IWL_AGG_OFF;
3161 err = 0;
3162 break;
3163 default:
3164 IWL_ERR(mvm,
3165 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3166 mvmsta->sta_id, tid, tid_data->state);
3167 IWL_ERR(mvm,
3168 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3169 err = -EINVAL;
3170 }
3171
3172 spin_unlock_bh(&mvmsta->lock);
3173
3174 return err;
3175}
3176
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003177int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3178 struct ieee80211_sta *sta, u16 tid)
3179{
Johannes Berg5b577a92013-11-14 18:20:04 +01003180 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003181 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3182 u16 txq_id;
Johannes Bergb6658ff2013-07-24 13:55:51 +02003183 enum iwl_mvm_agg_state old_state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003184
3185 /*
3186 * First set the agg state to OFF to avoid calling
3187 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3188 */
3189 spin_lock_bh(&mvmsta->lock);
3190 txq_id = tid_data->txq_id;
3191 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3192 mvmsta->sta_id, tid, txq_id, tid_data->state);
Johannes Bergb6658ff2013-07-24 13:55:51 +02003193 old_state = tid_data->state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003194 tid_data->state = IWL_AGG_OFF;
Eyal Shapiraefed6642014-09-14 15:58:53 +03003195 mvmsta->agg_tids &= ~BIT(tid);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003196 spin_unlock_bh(&mvmsta->lock);
3197
Avraham Stern4b387902018-03-07 10:41:18 +02003198 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03003199
Johannes Bergb6658ff2013-07-24 13:55:51 +02003200 if (old_state >= IWL_AGG_ON) {
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02003201 iwl_mvm_drain_sta(mvm, mvmsta, true);
Sara Sharond6d517b2017-03-06 10:16:11 +02003202
Mordechai Goodsteind167e812017-05-10 16:42:53 +03003203 if (iwl_mvm_has_new_tx_api(mvm)) {
3204 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3205 BIT(tid), 0))
3206 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Sara Sharond6d517b2017-03-06 10:16:11 +02003207 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
Mordechai Goodsteind167e812017-05-10 16:42:53 +03003208 } else {
3209 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
3210 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Sara Sharond6d517b2017-03-06 10:16:11 +02003211 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
Mordechai Goodsteind167e812017-05-10 16:42:53 +03003212 }
Sara Sharond6d517b2017-03-06 10:16:11 +02003213
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02003214 iwl_mvm_drain_sta(mvm, mvmsta, false);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003215
Johannes Bergf7f89e72014-08-05 15:24:44 +02003216 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
Johannes Bergb6658ff2013-07-24 13:55:51 +02003217 }
3218
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003219 return 0;
3220}
3221
Johannes Berg8ca151b2013-01-24 14:25:36 +01003222static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3223{
Johannes Berg2dc2a152015-06-16 17:09:18 +02003224 int i, max = -1, max_offs = -1;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003225
3226 lockdep_assert_held(&mvm->mutex);
3227
Johannes Berg2dc2a152015-06-16 17:09:18 +02003228 /* Pick the unused key offset with the highest 'deleted'
3229 * counter. Every time a key is deleted, all the counters
3230 * are incremented and the one that was just deleted is
3231 * reset to zero. Thus, the highest counter is the one
3232 * that was deleted longest ago. Pick that one.
3233 */
3234 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3235 if (test_bit(i, mvm->fw_key_table))
3236 continue;
3237 if (mvm->fw_key_deleted[i] > max) {
3238 max = mvm->fw_key_deleted[i];
3239 max_offs = i;
3240 }
3241 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003242
Johannes Berg2dc2a152015-06-16 17:09:18 +02003243 if (max_offs < 0)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003244 return STA_KEY_IDX_INVALID;
3245
Johannes Berg2dc2a152015-06-16 17:09:18 +02003246 return max_offs;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003247}
3248
Johannes Berg5f7a1842015-12-11 09:36:10 +01003249static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3250 struct ieee80211_vif *vif,
3251 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003252{
Johannes Berg5b530e92014-12-23 16:00:17 +01003253 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003254
Johannes Berg5f7a1842015-12-11 09:36:10 +01003255 if (sta)
3256 return iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003257
3258 /*
3259 * The device expects GTKs for station interfaces to be
3260 * installed as GTKs for the AP station. If we have no
3261 * station ID, then use AP's station ID.
3262 */
3263 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02003264 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Avri Altman9513c5e2015-10-19 16:29:11 +02003265 u8 sta_id = mvmvif->ap_sta_id;
3266
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03003267 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3268 lockdep_is_held(&mvm->mutex));
3269
Avri Altman9513c5e2015-10-19 16:29:11 +02003270 /*
3271 * It is possible that the 'sta' parameter is NULL,
3272 * for example when a GTK is removed - the sta_id will then
3273 * be the AP ID, and no station was passed by mac80211.
3274 */
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03003275 if (IS_ERR_OR_NULL(sta))
3276 return NULL;
3277
3278 return iwl_mvm_sta_from_mac80211(sta);
Avri Altman9513c5e2015-10-19 16:29:11 +02003279 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003280
Johannes Berg5f7a1842015-12-11 09:36:10 +01003281 return NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003282}
3283
3284static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
David Spinadel85aeb582017-03-30 19:43:53 +03003285 u32 sta_id,
Sara Sharon45c458b2016-11-09 15:43:26 +02003286 struct ieee80211_key_conf *key, bool mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003287 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003288 u8 key_offset, bool mfp)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003289{
Sara Sharon45c458b2016-11-09 15:43:26 +02003290 union {
3291 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3292 struct iwl_mvm_add_sta_key_cmd cmd;
3293 } u = {};
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003294 __le16 key_flags;
Johannes Berg79920742014-11-03 15:43:04 +01003295 int ret;
3296 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003297 u16 keyidx;
Sara Sharon45c458b2016-11-09 15:43:26 +02003298 u64 pn = 0;
3299 int i, size;
3300 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3301 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003302
David Spinadel85aeb582017-03-30 19:43:53 +03003303 if (sta_id == IWL_MVM_INVALID_STA)
3304 return -EINVAL;
3305
Sara Sharon45c458b2016-11-09 15:43:26 +02003306 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
Johannes Berg8ca151b2013-01-24 14:25:36 +01003307 STA_KEY_FLG_KEYID_MSK;
3308 key_flags = cpu_to_le16(keyidx);
3309 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3310
Sara Sharon45c458b2016-11-09 15:43:26 +02003311 switch (key->cipher) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003312 case WLAN_CIPHER_SUITE_TKIP:
3313 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003314 if (new_api) {
3315 memcpy((void *)&u.cmd.tx_mic_key,
3316 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3317 IWL_MIC_KEY_SIZE);
3318
3319 memcpy((void *)&u.cmd.rx_mic_key,
3320 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3321 IWL_MIC_KEY_SIZE);
3322 pn = atomic64_read(&key->tx_pn);
3323
3324 } else {
3325 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3326 for (i = 0; i < 5; i++)
3327 u.cmd_v1.tkip_rx_ttak[i] =
3328 cpu_to_le16(tkip_p1k[i]);
3329 }
3330 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003331 break;
3332 case WLAN_CIPHER_SUITE_CCMP:
3333 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
Sara Sharon45c458b2016-11-09 15:43:26 +02003334 memcpy(u.cmd.common.key, key->key, key->keylen);
3335 if (new_api)
3336 pn = atomic64_read(&key->tx_pn);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003337 break;
Johannes Bergba3943b2014-11-12 23:54:48 +01003338 case WLAN_CIPHER_SUITE_WEP104:
3339 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
John W. Linvilleaa0cb082015-01-12 16:18:11 -05003340 /* fall through */
Johannes Bergba3943b2014-11-12 23:54:48 +01003341 case WLAN_CIPHER_SUITE_WEP40:
3342 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003343 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
Johannes Bergba3943b2014-11-12 23:54:48 +01003344 break;
Ayala Beker2a53d162016-04-07 16:21:57 +03003345 case WLAN_CIPHER_SUITE_GCMP_256:
3346 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3347 /* fall through */
3348 case WLAN_CIPHER_SUITE_GCMP:
3349 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003350 memcpy(u.cmd.common.key, key->key, key->keylen);
3351 if (new_api)
3352 pn = atomic64_read(&key->tx_pn);
Ayala Beker2a53d162016-04-07 16:21:57 +03003353 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003354 default:
Max Stepanove36e5432013-08-27 19:56:13 +03003355 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
Sara Sharon45c458b2016-11-09 15:43:26 +02003356 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003357 }
3358
Johannes Bergba3943b2014-11-12 23:54:48 +01003359 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003360 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003361 if (mfp)
3362 key_flags |= cpu_to_le16(STA_KEY_MFP);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003363
Sara Sharon45c458b2016-11-09 15:43:26 +02003364 u.cmd.common.key_offset = key_offset;
3365 u.cmd.common.key_flags = key_flags;
David Spinadel85aeb582017-03-30 19:43:53 +03003366 u.cmd.common.sta_id = sta_id;
Sara Sharon45c458b2016-11-09 15:43:26 +02003367
3368 if (new_api) {
3369 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3370 size = sizeof(u.cmd);
3371 } else {
3372 size = sizeof(u.cmd_v1);
3373 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003374
3375 status = ADD_STA_SUCCESS;
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003376 if (cmd_flags & CMD_ASYNC)
Sara Sharon45c458b2016-11-09 15:43:26 +02003377 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3378 &u.cmd);
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003379 else
Sara Sharon45c458b2016-11-09 15:43:26 +02003380 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3381 &u.cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003382
3383 switch (status) {
3384 case ADD_STA_SUCCESS:
3385 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3386 break;
3387 default:
3388 ret = -EIO;
3389 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3390 break;
3391 }
3392
3393 return ret;
3394}
3395
3396static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3397 struct ieee80211_key_conf *keyconf,
3398 u8 sta_id, bool remove_key)
3399{
3400 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3401
3402 /* verify the key details match the required command's expectations */
Ayala Beker8e160ab2016-04-11 11:37:38 +03003403 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3404 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3405 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3406 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3407 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3408 return -EINVAL;
3409
3410 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3411 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
Johannes Berg8ca151b2013-01-24 14:25:36 +01003412 return -EINVAL;
3413
3414 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3415 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3416
3417 if (remove_key) {
3418 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3419 } else {
3420 struct ieee80211_key_seq seq;
3421 const u8 *pn;
3422
Ayala Bekeraa950522016-06-01 00:28:09 +03003423 switch (keyconf->cipher) {
3424 case WLAN_CIPHER_SUITE_AES_CMAC:
3425 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3426 break;
Ayala Beker8e160ab2016-04-11 11:37:38 +03003427 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3428 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3429 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3430 break;
Ayala Bekeraa950522016-06-01 00:28:09 +03003431 default:
3432 return -EINVAL;
3433 }
3434
Ayala Beker8e160ab2016-04-11 11:37:38 +03003435 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3436 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3437 igtk_cmd.ctrl_flags |=
3438 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003439 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3440 pn = seq.aes_cmac.pn;
3441 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3442 ((u64) pn[4] << 8) |
3443 ((u64) pn[3] << 16) |
3444 ((u64) pn[2] << 24) |
3445 ((u64) pn[1] << 32) |
3446 ((u64) pn[0] << 40));
3447 }
3448
3449 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3450 remove_key ? "removing" : "installing",
3451 igtk_cmd.sta_id);
3452
Ayala Beker8e160ab2016-04-11 11:37:38 +03003453 if (!iwl_mvm_has_new_rx_api(mvm)) {
3454 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3455 .ctrl_flags = igtk_cmd.ctrl_flags,
3456 .key_id = igtk_cmd.key_id,
3457 .sta_id = igtk_cmd.sta_id,
3458 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3459 };
3460
3461 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3462 ARRAY_SIZE(igtk_cmd_v1.igtk));
3463 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3464 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3465 }
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003466 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003467 sizeof(igtk_cmd), &igtk_cmd);
3468}
3469
3470
3471static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3472 struct ieee80211_vif *vif,
3473 struct ieee80211_sta *sta)
3474{
Johannes Berg5b530e92014-12-23 16:00:17 +01003475 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003476
3477 if (sta)
3478 return sta->addr;
3479
3480 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02003481 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003482 u8 sta_id = mvmvif->ap_sta_id;
3483 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3484 lockdep_is_held(&mvm->mutex));
3485 return sta->addr;
3486 }
3487
3488
3489 return NULL;
3490}
3491
Johannes Berg2f6319d2014-11-12 23:39:56 +01003492static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3493 struct ieee80211_vif *vif,
3494 struct ieee80211_sta *sta,
Johannes Bergba3943b2014-11-12 23:54:48 +01003495 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003496 u8 key_offset,
Johannes Bergba3943b2014-11-12 23:54:48 +01003497 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003498{
Johannes Berg8ca151b2013-01-24 14:25:36 +01003499 int ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003500 const u8 *addr;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003501 struct ieee80211_key_seq seq;
3502 u16 p1k[5];
David Spinadel85aeb582017-03-30 19:43:53 +03003503 u32 sta_id;
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003504 bool mfp = false;
David Spinadel85aeb582017-03-30 19:43:53 +03003505
3506 if (sta) {
3507 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3508
3509 sta_id = mvm_sta->sta_id;
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003510 mfp = sta->mfp;
David Spinadel85aeb582017-03-30 19:43:53 +03003511 } else if (vif->type == NL80211_IFTYPE_AP &&
3512 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3513 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3514
3515 sta_id = mvmvif->mcast_sta.sta_id;
3516 } else {
3517 IWL_ERR(mvm, "Failed to find station id\n");
3518 return -EINVAL;
3519 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003520
Johannes Berg8ca151b2013-01-24 14:25:36 +01003521 switch (keyconf->cipher) {
3522 case WLAN_CIPHER_SUITE_TKIP:
3523 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3524 /* get phase 1 key from mac80211 */
3525 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3526 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
David Spinadel85aeb582017-03-30 19:43:53 +03003527 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003528 seq.tkip.iv32, p1k, 0, key_offset,
3529 mfp);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003530 break;
3531 case WLAN_CIPHER_SUITE_CCMP:
Johannes Bergba3943b2014-11-12 23:54:48 +01003532 case WLAN_CIPHER_SUITE_WEP40:
3533 case WLAN_CIPHER_SUITE_WEP104:
Ayala Beker2a53d162016-04-07 16:21:57 +03003534 case WLAN_CIPHER_SUITE_GCMP:
3535 case WLAN_CIPHER_SUITE_GCMP_256:
David Spinadel85aeb582017-03-30 19:43:53 +03003536 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003537 0, NULL, 0, key_offset, mfp);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003538 break;
3539 default:
David Spinadel85aeb582017-03-30 19:43:53 +03003540 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003541 0, NULL, 0, key_offset, mfp);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003542 }
3543
Johannes Berg8ca151b2013-01-24 14:25:36 +01003544 return ret;
3545}
3546
Johannes Berg2f6319d2014-11-12 23:39:56 +01003547static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
Johannes Bergba3943b2014-11-12 23:54:48 +01003548 struct ieee80211_key_conf *keyconf,
3549 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003550{
Sara Sharon45c458b2016-11-09 15:43:26 +02003551 union {
3552 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3553 struct iwl_mvm_add_sta_key_cmd cmd;
3554 } u = {};
3555 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3556 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003557 __le16 key_flags;
Sara Sharon45c458b2016-11-09 15:43:26 +02003558 int ret, size;
Johannes Berg79920742014-11-03 15:43:04 +01003559 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003560
Sara Sharone4f13ad2018-01-15 13:50:59 +02003561 /* This is a valid situation for GTK removal */
David Spinadel85aeb582017-03-30 19:43:53 +03003562 if (sta_id == IWL_MVM_INVALID_STA)
Sara Sharone4f13ad2018-01-15 13:50:59 +02003563 return 0;
David Spinadel85aeb582017-03-30 19:43:53 +03003564
Emmanuel Grumbach8115efb2013-02-05 10:08:35 +02003565 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3566 STA_KEY_FLG_KEYID_MSK);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003567 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3568 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3569
Johannes Bergba3943b2014-11-12 23:54:48 +01003570 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003571 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3572
Sara Sharon45c458b2016-11-09 15:43:26 +02003573 /*
3574 * The fields assigned here are in the same location at the start
3575 * of the command, so we can do this union trick.
3576 */
3577 u.cmd.common.key_flags = key_flags;
3578 u.cmd.common.key_offset = keyconf->hw_key_idx;
3579 u.cmd.common.sta_id = sta_id;
3580
3581 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003582
Johannes Berg8ca151b2013-01-24 14:25:36 +01003583 status = ADD_STA_SUCCESS;
Sara Sharon45c458b2016-11-09 15:43:26 +02003584 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3585 &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003586
3587 switch (status) {
3588 case ADD_STA_SUCCESS:
3589 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3590 break;
3591 default:
3592 ret = -EIO;
3593 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3594 break;
3595 }
3596
3597 return ret;
3598}
3599
Johannes Berg2f6319d2014-11-12 23:39:56 +01003600int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3601 struct ieee80211_vif *vif,
3602 struct ieee80211_sta *sta,
3603 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003604 u8 key_offset)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003605{
Johannes Bergba3943b2014-11-12 23:54:48 +01003606 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003607 struct iwl_mvm_sta *mvm_sta;
David Spinadel85aeb582017-03-30 19:43:53 +03003608 u8 sta_id = IWL_MVM_INVALID_STA;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003609 int ret;
Matti Gottlieb11828db2015-06-01 15:15:11 +03003610 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
Johannes Berg2f6319d2014-11-12 23:39:56 +01003611
3612 lockdep_assert_held(&mvm->mutex);
3613
David Spinadel85aeb582017-03-30 19:43:53 +03003614 if (vif->type != NL80211_IFTYPE_AP ||
3615 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3616 /* Get the station id from the mvm local station table */
3617 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3618 if (!mvm_sta) {
3619 IWL_ERR(mvm, "Failed to find station\n");
Johannes Berg2f6319d2014-11-12 23:39:56 +01003620 return -EINVAL;
3621 }
David Spinadel85aeb582017-03-30 19:43:53 +03003622 sta_id = mvm_sta->sta_id;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003623
David Spinadel85aeb582017-03-30 19:43:53 +03003624 /*
3625 * It is possible that the 'sta' parameter is NULL, and thus
Beni Leve829b172018-02-20 13:41:54 +02003626 * there is a need to retrieve the sta from the local station
David Spinadel85aeb582017-03-30 19:43:53 +03003627 * table.
3628 */
3629 if (!sta) {
3630 sta = rcu_dereference_protected(
3631 mvm->fw_id_to_mac_id[sta_id],
3632 lockdep_is_held(&mvm->mutex));
3633 if (IS_ERR_OR_NULL(sta)) {
3634 IWL_ERR(mvm, "Invalid station id\n");
3635 return -EINVAL;
3636 }
3637 }
3638
3639 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3640 return -EINVAL;
Beni Leve829b172018-02-20 13:41:54 +02003641 } else {
3642 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3643
3644 sta_id = mvmvif->mcast_sta.sta_id;
3645 }
3646
3647 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3648 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3649 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3650 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3651 goto end;
David Spinadel85aeb582017-03-30 19:43:53 +03003652 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01003653
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003654 /* If the key_offset is not pre-assigned, we need to find a
3655 * new offset to use. In normal cases, the offset is not
3656 * pre-assigned, but during HW_RESTART we want to reuse the
3657 * same indices, so we pass them when this function is called.
3658 *
3659 * In D3 entry, we need to hardcoded the indices (because the
3660 * firmware hardcodes the PTK offset to 0). In this case, we
3661 * need to make sure we don't overwrite the hw_key_idx in the
3662 * keyconf structure, because otherwise we cannot configure
3663 * the original ones back when resuming.
3664 */
3665 if (key_offset == STA_KEY_IDX_INVALID) {
3666 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3667 if (key_offset == STA_KEY_IDX_INVALID)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003668 return -ENOSPC;
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003669 keyconf->hw_key_idx = key_offset;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003670 }
3671
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003672 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003673 if (ret)
Johannes Bergba3943b2014-11-12 23:54:48 +01003674 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003675
3676 /*
3677 * For WEP, the same key is used for multicast and unicast. Upload it
3678 * again, using the same key offset, and now pointing the other one
3679 * to the same key slot (offset).
3680 * If this fails, remove the original as well.
3681 */
David Spinadel85aeb582017-03-30 19:43:53 +03003682 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3683 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3684 sta) {
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003685 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3686 key_offset, !mcast);
Johannes Bergba3943b2014-11-12 23:54:48 +01003687 if (ret) {
Johannes Bergba3943b2014-11-12 23:54:48 +01003688 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003689 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003690 }
3691 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01003692
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003693 __set_bit(key_offset, mvm->fw_key_table);
3694
Johannes Berg2f6319d2014-11-12 23:39:56 +01003695end:
3696 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3697 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
Matti Gottlieb11828db2015-06-01 15:15:11 +03003698 sta ? sta->addr : zero_addr, ret);
Johannes Berg2f6319d2014-11-12 23:39:56 +01003699 return ret;
3700}
3701
3702int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3703 struct ieee80211_vif *vif,
3704 struct ieee80211_sta *sta,
3705 struct ieee80211_key_conf *keyconf)
3706{
Johannes Bergba3943b2014-11-12 23:54:48 +01003707 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003708 struct iwl_mvm_sta *mvm_sta;
Sara Sharon0ae98812017-01-04 14:53:58 +02003709 u8 sta_id = IWL_MVM_INVALID_STA;
Johannes Berg2dc2a152015-06-16 17:09:18 +02003710 int ret, i;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003711
3712 lockdep_assert_held(&mvm->mutex);
3713
Johannes Berg5f7a1842015-12-11 09:36:10 +01003714 /* Get the station from the mvm local station table */
3715 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
Luca Coelho71793b7d2017-03-30 12:04:47 +03003716 if (mvm_sta)
3717 sta_id = mvm_sta->sta_id;
David Spinadel85aeb582017-03-30 19:43:53 +03003718 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3719 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3720
Johannes Berg2f6319d2014-11-12 23:39:56 +01003721
3722 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3723 keyconf->keyidx, sta_id);
3724
Luca Coelho71793b7d2017-03-30 12:04:47 +03003725 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3726 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3727 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
Johannes Berg2f6319d2014-11-12 23:39:56 +01003728 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3729
3730 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3731 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3732 keyconf->hw_key_idx);
3733 return -ENOENT;
3734 }
3735
Johannes Berg2dc2a152015-06-16 17:09:18 +02003736 /* track which key was deleted last */
3737 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3738 if (mvm->fw_key_deleted[i] < U8_MAX)
3739 mvm->fw_key_deleted[i]++;
3740 }
3741 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3742
David Spinadel85aeb582017-03-30 19:43:53 +03003743 if (sta && !mvm_sta) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01003744 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3745 return 0;
3746 }
3747
Johannes Bergba3943b2014-11-12 23:54:48 +01003748 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3749 if (ret)
3750 return ret;
3751
3752 /* delete WEP key twice to get rid of (now useless) offset */
3753 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3754 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3755 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3756
3757 return ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003758}
3759
Johannes Berg8ca151b2013-01-24 14:25:36 +01003760void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3761 struct ieee80211_vif *vif,
3762 struct ieee80211_key_conf *keyconf,
3763 struct ieee80211_sta *sta, u32 iv32,
3764 u16 *phase1key)
3765{
Beni Levc3eb5362013-02-06 17:22:18 +02003766 struct iwl_mvm_sta *mvm_sta;
Johannes Bergba3943b2014-11-12 23:54:48 +01003767 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003768 bool mfp = sta ? sta->mfp : false;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003769
Beni Levc3eb5362013-02-06 17:22:18 +02003770 rcu_read_lock();
3771
Johannes Berg5f7a1842015-12-11 09:36:10 +01003772 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3773 if (WARN_ON_ONCE(!mvm_sta))
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003774 goto unlock;
David Spinadel85aeb582017-03-30 19:43:53 +03003775 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003776 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3777 mfp);
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003778
3779 unlock:
Beni Levc3eb5362013-02-06 17:22:18 +02003780 rcu_read_unlock();
Johannes Berg8ca151b2013-01-24 14:25:36 +01003781}
3782
Johannes Berg9cc40712013-02-15 22:47:48 +01003783void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3784 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003785{
Johannes Berg5b577a92013-11-14 18:20:04 +01003786 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003787 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003788 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003789 .sta_id = mvmsta->sta_id,
Emmanuel Grumbach5af01772013-06-09 12:59:24 +03003790 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
Johannes Berg9cc40712013-02-15 22:47:48 +01003791 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003792 };
3793 int ret;
3794
Sara Sharon854c5702016-01-26 13:17:47 +02003795 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3796 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003797 if (ret)
3798 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3799}
3800
Johannes Berg9cc40712013-02-15 22:47:48 +01003801void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3802 struct ieee80211_sta *sta,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003803 enum ieee80211_frame_release_type reason,
Johannes Berg3e56ead2013-02-15 22:23:18 +01003804 u16 cnt, u16 tids, bool more_data,
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003805 bool single_sta_queue)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003806{
Johannes Berg5b577a92013-11-14 18:20:04 +01003807 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003808 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003809 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003810 .sta_id = mvmsta->sta_id,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003811 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3812 .sleep_tx_count = cpu_to_le16(cnt),
Johannes Berg9cc40712013-02-15 22:47:48 +01003813 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003814 };
Johannes Berg3e56ead2013-02-15 22:23:18 +01003815 int tid, ret;
3816 unsigned long _tids = tids;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003817
Johannes Berg3e56ead2013-02-15 22:23:18 +01003818 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3819 * Note that this field is reserved and unused by firmware not
3820 * supporting GO uAPSD, so it's safe to always do this.
3821 */
3822 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3823 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3824
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003825 /* If we're releasing frames from aggregation or dqa queues then check
3826 * if all the queues that we're releasing frames from, combined, have:
Johannes Berg3e56ead2013-02-15 22:23:18 +01003827 * - more frames than the service period, in which case more_data
3828 * needs to be set
3829 * - fewer than 'cnt' frames, in which case we need to adjust the
3830 * firmware command (but do that unconditionally)
3831 */
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003832 if (single_sta_queue) {
Johannes Berg3e56ead2013-02-15 22:23:18 +01003833 int remaining = cnt;
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003834 int sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003835
3836 spin_lock_bh(&mvmsta->lock);
3837 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3838 struct iwl_mvm_tid_data *tid_data;
3839 u16 n_queued;
3840
3841 tid_data = &mvmsta->tid_data[tid];
Johannes Berg3e56ead2013-02-15 22:23:18 +01003842
Liad Kaufmandd321622017-04-05 16:25:11 +03003843 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003844 if (n_queued > remaining) {
3845 more_data = true;
3846 remaining = 0;
3847 break;
3848 }
3849 remaining -= n_queued;
3850 }
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003851 sleep_tx_count = cnt - remaining;
3852 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3853 mvmsta->sleep_tx_count = sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003854 spin_unlock_bh(&mvmsta->lock);
3855
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003856 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003857 if (WARN_ON(cnt - remaining == 0)) {
3858 ieee80211_sta_eosp(sta);
3859 return;
3860 }
3861 }
3862
3863 /* Note: this is ignored by firmware not supporting GO uAPSD */
3864 if (more_data)
Sara Sharonced19f22017-02-06 19:09:32 +02003865 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003866
3867 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3868 mvmsta->next_status_eosp = true;
Sara Sharonced19f22017-02-06 19:09:32 +02003869 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003870 } else {
Sara Sharonced19f22017-02-06 19:09:32 +02003871 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003872 }
3873
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +02003874 /* block the Tx queues until the FW updated the sleep Tx count */
3875 iwl_trans_block_txq_ptrs(mvm->trans, true);
3876
3877 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3878 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
Sara Sharon854c5702016-01-26 13:17:47 +02003879 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003880 if (ret)
3881 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3882}
Johannes Berg3e56ead2013-02-15 22:23:18 +01003883
Johannes Berg04168412015-06-23 21:22:09 +02003884void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3885 struct iwl_rx_cmd_buffer *rxb)
Johannes Berg3e56ead2013-02-15 22:23:18 +01003886{
3887 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3888 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3889 struct ieee80211_sta *sta;
3890 u32 sta_id = le32_to_cpu(notif->sta_id);
3891
3892 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
Johannes Berg04168412015-06-23 21:22:09 +02003893 return;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003894
3895 rcu_read_lock();
3896 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3897 if (!IS_ERR_OR_NULL(sta))
3898 ieee80211_sta_eosp(sta);
3899 rcu_read_unlock();
Johannes Berg3e56ead2013-02-15 22:23:18 +01003900}
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003901
3902void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3903 struct iwl_mvm_sta *mvmsta, bool disable)
3904{
3905 struct iwl_mvm_add_sta_cmd cmd = {
3906 .add_modify = STA_MODE_MODIFY,
3907 .sta_id = mvmsta->sta_id,
3908 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3909 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3910 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3911 };
3912 int ret;
3913
Sara Sharon854c5702016-01-26 13:17:47 +02003914 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3915 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003916 if (ret)
3917 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3918}
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003919
3920void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3921 struct ieee80211_sta *sta,
3922 bool disable)
3923{
3924 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3925
3926 spin_lock_bh(&mvm_sta->lock);
3927
3928 if (mvm_sta->disable_tx == disable) {
3929 spin_unlock_bh(&mvm_sta->lock);
3930 return;
3931 }
3932
3933 mvm_sta->disable_tx = disable;
3934
Johannes Bergc8f54702017-06-19 23:50:31 +02003935 /* Tell mac80211 to start/stop queuing tx for this station */
3936 ieee80211_sta_block_awake(mvm->hw, sta, disable);
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003937
3938 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3939
3940 spin_unlock_bh(&mvm_sta->lock);
3941}
3942
Sara Sharonced19f22017-02-06 19:09:32 +02003943static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3944 struct iwl_mvm_vif *mvmvif,
3945 struct iwl_mvm_int_sta *sta,
3946 bool disable)
3947{
3948 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3949 struct iwl_mvm_add_sta_cmd cmd = {
3950 .add_modify = STA_MODE_MODIFY,
3951 .sta_id = sta->sta_id,
3952 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3953 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3954 .mac_id_n_color = cpu_to_le32(id),
3955 };
3956 int ret;
3957
3958 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3959 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3960 if (ret)
3961 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3962}
3963
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003964void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3965 struct iwl_mvm_vif *mvmvif,
3966 bool disable)
3967{
3968 struct ieee80211_sta *sta;
3969 struct iwl_mvm_sta *mvm_sta;
3970 int i;
3971
3972 lockdep_assert_held(&mvm->mutex);
3973
3974 /* Block/unblock all the stations of the given mvmvif */
Sara Sharon0ae98812017-01-04 14:53:58 +02003975 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003976 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3977 lockdep_is_held(&mvm->mutex));
3978 if (IS_ERR_OR_NULL(sta))
3979 continue;
3980
3981 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3982 if (mvm_sta->mac_id_n_color !=
3983 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3984 continue;
3985
3986 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3987 }
Sara Sharonced19f22017-02-06 19:09:32 +02003988
3989 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3990 return;
3991
3992 /* Need to block/unblock also multicast station */
3993 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3994 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3995 &mvmvif->mcast_sta, disable);
3996
3997 /*
3998 * Only unblock the broadcast station (FW blocks it for immediate
3999 * quiet, not the driver)
4000 */
4001 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
4002 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4003 &mvmvif->bcast_sta, disable);
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03004004}
Luciano Coelhodc88b4b2014-11-10 11:10:14 +02004005
4006void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
4007{
4008 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4009 struct iwl_mvm_sta *mvmsta;
4010
4011 rcu_read_lock();
4012
4013 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
4014
4015 if (!WARN_ON(!mvmsta))
4016 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
4017
4018 rcu_read_unlock();
4019}
Liad Kaufmandd321622017-04-05 16:25:11 +03004020
4021u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
4022{
4023 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
4024
4025 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02004026 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
Liad Kaufmandd321622017-04-05 16:25:11 +03004027 * to align the wrap around of ssn so we compare relevant values.
4028 */
4029 if (mvm->trans->cfg->gen2)
4030 sn &= 0xff;
4031
4032 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
4033}