blob: ce03f9750c3a83575df49d21bd7d111247c1f260 [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03008 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020010 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Emmanuel Grumbach48831452018-01-29 10:00:05 +020011 * Copyright(c) 2018 Intel Corporation
Johannes Berg8ca151b2013-01-24 14:25:36 +010012 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
Johannes Berg8ca151b2013-01-24 14:25:36 +010022 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020023 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010024 *
25 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020026 * Intel Linux Wireless <linuxwifi@intel.com>
Johannes Berg8ca151b2013-01-24 14:25:36 +010027 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *
29 * BSD LICENSE
30 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +030031 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020033 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Liad Kaufmanbd8f3fc2018-01-17 15:25:28 +020034 * Copyright(c) 2018 Intel Corporation
Johannes Berg8ca151b2013-01-24 14:25:36 +010035 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
46 * distribution.
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *
63 *****************************************************************************/
64#include <net/mac80211.h>
65
66#include "mvm.h"
67#include "sta.h"
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +030068#include "rs.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010069
Avraham Stern337bfc92018-06-04 15:10:18 +030070static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm);
71
72static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
73 u32 sta_id,
74 struct ieee80211_key_conf *key, bool mcast,
75 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
76 u8 key_offset, bool mfp);
77
Sara Sharon854c5702016-01-26 13:17:47 +020078/*
79 * New version of ADD_STA_sta command added new fields at the end of the
80 * structure, so sending the size of the relevant API's structure is enough to
81 * support both API versions.
82 */
83static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
84{
Sara Sharonced19f22017-02-06 19:09:32 +020085 if (iwl_mvm_has_new_rx_api(mvm) ||
86 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
87 return sizeof(struct iwl_mvm_add_sta_cmd);
88 else
89 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
Sara Sharon854c5702016-01-26 13:17:47 +020090}
91
Eliad Pellerb92e6612014-01-23 17:58:23 +020092static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
93 enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +010094{
95 int sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +020096 u32 reserved_ids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +010097
Eliad Pellerb92e6612014-01-23 17:58:23 +020098 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
Johannes Berg8ca151b2013-01-24 14:25:36 +010099 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
100
101 lockdep_assert_held(&mvm->mutex);
102
Eliad Pellerb92e6612014-01-23 17:58:23 +0200103 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
104 if (iftype != NL80211_IFTYPE_STATION)
105 reserved_ids = BIT(0);
106
Johannes Berg8ca151b2013-01-24 14:25:36 +0100107 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
Sara Sharon0ae98812017-01-04 14:53:58 +0200108 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
Eliad Pellerb92e6612014-01-23 17:58:23 +0200109 if (BIT(sta_id) & reserved_ids)
110 continue;
111
Johannes Berg8ca151b2013-01-24 14:25:36 +0100112 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
113 lockdep_is_held(&mvm->mutex)))
114 return sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +0200115 }
Sara Sharon0ae98812017-01-04 14:53:58 +0200116 return IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100117}
118
Johannes Berg7a453972013-02-12 13:10:44 +0100119/* send station add/update command to firmware */
120int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Liad Kaufman24afba72015-07-28 18:56:08 +0300121 bool update, unsigned int flags)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100122{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +0100123 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300124 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
125 .sta_id = mvm_sta->sta_id,
126 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
127 .add_modify = update ? 1 : 0,
128 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
Naftali Goldstein8addabf2017-07-27 04:53:55 +0300129 STA_FLG_MIMO_EN_MSK |
130 STA_FLG_RTS_MIMO_PROT),
Liad Kaufmancf0cda12015-09-24 10:44:12 +0200131 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300132 };
Johannes Berg8ca151b2013-01-24 14:25:36 +0100133 int ret;
134 u32 status;
135 u32 agg_size = 0, mpdu_dens = 0;
136
Sara Sharonced19f22017-02-06 19:09:32 +0200137 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
138 add_sta_cmd.station_type = mvm_sta->sta_type;
139
Liad Kaufman24afba72015-07-28 18:56:08 +0300140 if (!update || (flags & STA_MODIFY_QUEUES)) {
Johannes Berg7a453972013-02-12 13:10:44 +0100141 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
Liad Kaufman24afba72015-07-28 18:56:08 +0300142
Sara Sharonbb497012016-09-29 14:52:40 +0300143 if (!iwl_mvm_has_new_tx_api(mvm)) {
144 add_sta_cmd.tfd_queue_msk =
145 cpu_to_le32(mvm_sta->tfd_queue_msk);
146
147 if (flags & STA_MODIFY_QUEUES)
148 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
149 } else {
150 WARN_ON(flags & STA_MODIFY_QUEUES);
151 }
Johannes Berg7a453972013-02-12 13:10:44 +0100152 }
Johannes Berg5bc5aaa2013-02-12 14:35:36 +0100153
154 switch (sta->bandwidth) {
155 case IEEE80211_STA_RX_BW_160:
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
157 /* fall through */
158 case IEEE80211_STA_RX_BW_80:
159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
160 /* fall through */
161 case IEEE80211_STA_RX_BW_40:
162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
163 /* fall through */
164 case IEEE80211_STA_RX_BW_20:
165 if (sta->ht_cap.ht_supported)
166 add_sta_cmd.station_flags |=
167 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
168 break;
169 }
170
171 switch (sta->rx_nss) {
172 case 1:
173 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
174 break;
175 case 2:
176 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
177 break;
178 case 3 ... 8:
179 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
180 break;
181 }
182
183 switch (sta->smps_mode) {
184 case IEEE80211_SMPS_AUTOMATIC:
185 case IEEE80211_SMPS_NUM_MODES:
186 WARN_ON(1);
187 break;
188 case IEEE80211_SMPS_STATIC:
189 /* override NSS */
190 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
191 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
192 break;
193 case IEEE80211_SMPS_DYNAMIC:
194 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
195 break;
196 case IEEE80211_SMPS_OFF:
197 /* nothing */
198 break;
199 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100200
201 if (sta->ht_cap.ht_supported) {
202 add_sta_cmd.station_flags_msk |=
203 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
204 STA_FLG_AGG_MPDU_DENS_MSK);
205
206 mpdu_dens = sta->ht_cap.ampdu_density;
207 }
208
209 if (sta->vht_cap.vht_supported) {
210 agg_size = sta->vht_cap.cap &
211 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
212 agg_size >>=
213 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
214 } else if (sta->ht_cap.ht_supported) {
215 agg_size = sta->ht_cap.ampdu_factor;
216 }
217
218 add_sta_cmd.station_flags |=
219 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
220 add_sta_cmd.station_flags |=
221 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
Gregory Greenmand94c5a82018-04-24 06:26:41 +0300222 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
Johannes Berg6ea29ce2016-12-01 16:25:30 +0100223 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100224
Johannes Berg65e25482016-04-13 14:24:22 +0200225 if (sta->wme) {
226 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
227
228 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200229 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
Johannes Berg65e25482016-04-13 14:24:22 +0200230 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200231 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
Johannes Berg65e25482016-04-13 14:24:22 +0200232 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200233 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
Johannes Berg65e25482016-04-13 14:24:22 +0200234 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200235 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
236 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
Emmanuel Grumbache71ca5e2017-02-08 14:53:32 +0200237 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
Johannes Berg65e25482016-04-13 14:24:22 +0200238 }
239
Johannes Berg8ca151b2013-01-24 14:25:36 +0100240 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +0200241 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
242 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +0300243 &add_sta_cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100244 if (ret)
245 return ret;
246
Sara Sharon837c4da2016-01-07 16:50:45 +0200247 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100248 case ADD_STA_SUCCESS:
249 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
250 break;
251 default:
252 ret = -EIO;
253 IWL_ERR(mvm, "ADD_STA failed\n");
254 break;
255 }
256
257 return ret;
258}
259
Kees Cook8cef5342017-10-24 02:29:37 -0700260static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
Sara Sharon10b2b202016-03-20 16:23:41 +0200261{
Kees Cook8cef5342017-10-24 02:29:37 -0700262 struct iwl_mvm_baid_data *data =
263 from_timer(data, t, session_timer);
264 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
Sara Sharon10b2b202016-03-20 16:23:41 +0200265 struct iwl_mvm_baid_data *ba_data;
266 struct ieee80211_sta *sta;
267 struct iwl_mvm_sta *mvm_sta;
268 unsigned long timeout;
269
270 rcu_read_lock();
271
272 ba_data = rcu_dereference(*rcu_ptr);
273
274 if (WARN_ON(!ba_data))
275 goto unlock;
276
277 if (!ba_data->timeout)
278 goto unlock;
279
280 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
281 if (time_is_after_jiffies(timeout)) {
282 mod_timer(&ba_data->session_timer, timeout);
283 goto unlock;
284 }
285
286 /* Timer expired */
287 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
Emmanuel Grumbach61dd8a82017-06-12 15:10:09 +0300288
289 /*
290 * sta should be valid unless the following happens:
291 * The firmware asserts which triggers a reconfig flow, but
292 * the reconfig fails before we set the pointer to sta into
293 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
294 * A-MDPU and hence the timer continues to run. Then, the
295 * timer expires and sta is NULL.
296 */
297 if (!sta)
298 goto unlock;
299
Sara Sharon10b2b202016-03-20 16:23:41 +0200300 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Naftali Goldstein20fc6902017-07-11 10:07:32 +0300301 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
302 sta->addr, ba_data->tid);
Sara Sharon10b2b202016-03-20 16:23:41 +0200303unlock:
304 rcu_read_unlock();
305}
306
Liad Kaufman9794c642015-08-19 17:34:28 +0300307/* Disable aggregations for a bitmap of TIDs for a given station */
308static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
309 unsigned long disable_agg_tids,
310 bool remove_queue)
311{
312 struct iwl_mvm_add_sta_cmd cmd = {};
313 struct ieee80211_sta *sta;
314 struct iwl_mvm_sta *mvmsta;
315 u32 status;
316 u8 sta_id;
317 int ret;
318
Sara Sharonbb497012016-09-29 14:52:40 +0300319 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
320 return -EINVAL;
321
Liad Kaufman9794c642015-08-19 17:34:28 +0300322 spin_lock_bh(&mvm->queue_info_lock);
323 sta_id = mvm->queue_info[queue].ra_sta_id;
324 spin_unlock_bh(&mvm->queue_info_lock);
325
326 rcu_read_lock();
327
328 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
329
330 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
331 rcu_read_unlock();
332 return -EINVAL;
333 }
334
335 mvmsta = iwl_mvm_sta_from_mac80211(sta);
336
337 mvmsta->tid_disable_agg |= disable_agg_tids;
338
339 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
340 cmd.sta_id = mvmsta->sta_id;
341 cmd.add_modify = STA_MODE_MODIFY;
342 cmd.modify_mask = STA_MODIFY_QUEUES;
343 if (disable_agg_tids)
344 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
345 if (remove_queue)
346 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
347 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
348 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
349
350 rcu_read_unlock();
351
352 /* Notify FW of queue removal from the STA queues */
353 status = ADD_STA_SUCCESS;
354 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
355 iwl_mvm_add_sta_cmd_size(mvm),
356 &cmd, &status);
357
358 return ret;
359}
360
Johannes Berg99448a82018-07-04 11:38:34 +0200361static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
362 int mac80211_queue, u8 tid, u8 flags)
363{
364 struct iwl_scd_txq_cfg_cmd cmd = {
365 .scd_queue = queue,
366 .action = SCD_CFG_DISABLE_QUEUE,
367 };
368 bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
369 int ret;
370
371 if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
372 return -EINVAL;
373
374 if (iwl_mvm_has_new_tx_api(mvm)) {
375 spin_lock_bh(&mvm->queue_info_lock);
376
377 if (remove_mac_queue)
378 mvm->hw_queue_to_mac80211[queue] &=
379 ~BIT(mac80211_queue);
380
381 spin_unlock_bh(&mvm->queue_info_lock);
382
383 iwl_trans_txq_free(mvm->trans, queue);
384
385 return 0;
386 }
387
388 spin_lock_bh(&mvm->queue_info_lock);
389
Johannes Berg1c140892018-07-04 11:58:28 +0200390 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) {
Johannes Berg99448a82018-07-04 11:38:34 +0200391 spin_unlock_bh(&mvm->queue_info_lock);
392 return 0;
393 }
394
395 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
396
397 /*
398 * If there is another TID with the same AC - don't remove the MAC queue
399 * from the mapping
400 */
401 if (tid < IWL_MAX_TID_COUNT) {
402 unsigned long tid_bitmap =
403 mvm->queue_info[queue].tid_bitmap;
404 int ac = tid_to_mac80211_ac[tid];
405 int i;
406
407 for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
408 if (tid_to_mac80211_ac[i] == ac)
409 remove_mac_queue = false;
410 }
411 }
412
413 if (remove_mac_queue)
414 mvm->hw_queue_to_mac80211[queue] &=
415 ~BIT(mac80211_queue);
Johannes Berg99448a82018-07-04 11:38:34 +0200416
Johannes Berg1c140892018-07-04 11:58:28 +0200417 cmd.action = mvm->queue_info[queue].tid_bitmap ?
Johannes Berg99448a82018-07-04 11:38:34 +0200418 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
419 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
420 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
421
422 IWL_DEBUG_TX_QUEUES(mvm,
Johannes Berg1c140892018-07-04 11:58:28 +0200423 "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
Johannes Berg99448a82018-07-04 11:38:34 +0200424 queue,
Johannes Berg1c140892018-07-04 11:58:28 +0200425 mvm->queue_info[queue].tid_bitmap,
Johannes Berg99448a82018-07-04 11:38:34 +0200426 mvm->hw_queue_to_mac80211[queue]);
427
428 /* If the queue is still enabled - nothing left to do in this func */
429 if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
430 spin_unlock_bh(&mvm->queue_info_lock);
431 return 0;
432 }
433
434 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
435 cmd.tid = mvm->queue_info[queue].txq_tid;
436
437 /* Make sure queue info is correct even though we overwrite it */
Johannes Berg1c140892018-07-04 11:58:28 +0200438 WARN(mvm->queue_info[queue].tid_bitmap ||
Johannes Berg99448a82018-07-04 11:38:34 +0200439 mvm->hw_queue_to_mac80211[queue],
Johannes Berg1c140892018-07-04 11:58:28 +0200440 "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n",
441 queue, mvm->hw_queue_to_mac80211[queue],
Johannes Berg99448a82018-07-04 11:38:34 +0200442 mvm->queue_info[queue].tid_bitmap);
443
444 /* If we are here - the queue is freed and we can zero out these vals */
Johannes Berg99448a82018-07-04 11:38:34 +0200445 mvm->queue_info[queue].tid_bitmap = 0;
446 mvm->hw_queue_to_mac80211[queue] = 0;
447
448 /* Regardless if this is a reserved TXQ for a STA - mark it as false */
449 mvm->queue_info[queue].reserved = false;
450
451 spin_unlock_bh(&mvm->queue_info_lock);
452
453 iwl_trans_txq_disable(mvm->trans, queue, false);
454 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
455 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
456
457 if (ret)
458 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
459 queue, ret);
460 return ret;
461}
462
Liad Kaufman42db09c2016-05-02 14:01:14 +0300463static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
464{
465 struct ieee80211_sta *sta;
466 struct iwl_mvm_sta *mvmsta;
467 unsigned long tid_bitmap;
468 unsigned long agg_tids = 0;
Sharon Dvir806911d2017-05-21 12:09:49 +0300469 u8 sta_id;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300470 int tid;
471
472 lockdep_assert_held(&mvm->mutex);
473
Sara Sharonbb497012016-09-29 14:52:40 +0300474 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
475 return -EINVAL;
476
Liad Kaufman42db09c2016-05-02 14:01:14 +0300477 spin_lock_bh(&mvm->queue_info_lock);
478 sta_id = mvm->queue_info[queue].ra_sta_id;
479 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
480 spin_unlock_bh(&mvm->queue_info_lock);
481
482 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
483 lockdep_is_held(&mvm->mutex));
484
485 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
486 return -EINVAL;
487
488 mvmsta = iwl_mvm_sta_from_mac80211(sta);
489
490 spin_lock_bh(&mvmsta->lock);
491 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
492 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
493 agg_tids |= BIT(tid);
494 }
495 spin_unlock_bh(&mvmsta->lock);
496
497 return agg_tids;
498}
499
Liad Kaufman9794c642015-08-19 17:34:28 +0300500/*
501 * Remove a queue from a station's resources.
502 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
503 * doesn't disable the queue
504 */
505static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
506{
507 struct ieee80211_sta *sta;
508 struct iwl_mvm_sta *mvmsta;
509 unsigned long tid_bitmap;
510 unsigned long disable_agg_tids = 0;
511 u8 sta_id;
512 int tid;
513
514 lockdep_assert_held(&mvm->mutex);
515
Sara Sharonbb497012016-09-29 14:52:40 +0300516 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
517 return -EINVAL;
518
Liad Kaufman9794c642015-08-19 17:34:28 +0300519 spin_lock_bh(&mvm->queue_info_lock);
520 sta_id = mvm->queue_info[queue].ra_sta_id;
521 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
522 spin_unlock_bh(&mvm->queue_info_lock);
523
524 rcu_read_lock();
525
526 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
527
528 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
529 rcu_read_unlock();
530 return 0;
531 }
532
533 mvmsta = iwl_mvm_sta_from_mac80211(sta);
534
535 spin_lock_bh(&mvmsta->lock);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300536 /* Unmap MAC queues and TIDs from this queue */
Liad Kaufman9794c642015-08-19 17:34:28 +0300537 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300538 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
539 disable_agg_tids |= BIT(tid);
Sara Sharon6862fce2017-02-22 19:34:17 +0200540 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman9794c642015-08-19 17:34:28 +0300541 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300542
Liad Kaufman42db09c2016-05-02 14:01:14 +0300543 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
Liad Kaufman9794c642015-08-19 17:34:28 +0300544 spin_unlock_bh(&mvmsta->lock);
545
546 rcu_read_unlock();
547
Liad Kaufman9794c642015-08-19 17:34:28 +0300548 return disable_agg_tids;
549}
550
Sara Sharon01796ff2016-11-16 17:04:36 +0200551static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
552 bool same_sta)
553{
554 struct iwl_mvm_sta *mvmsta;
555 u8 txq_curr_ac, sta_id, tid;
556 unsigned long disable_agg_tids = 0;
557 int ret;
558
559 lockdep_assert_held(&mvm->mutex);
560
Sara Sharonbb497012016-09-29 14:52:40 +0300561 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
562 return -EINVAL;
563
Sara Sharon01796ff2016-11-16 17:04:36 +0200564 spin_lock_bh(&mvm->queue_info_lock);
565 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
566 sta_id = mvm->queue_info[queue].ra_sta_id;
567 tid = mvm->queue_info[queue].txq_tid;
568 spin_unlock_bh(&mvm->queue_info_lock);
569
570 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
Sharon Dvire3df1e42017-02-21 10:41:31 +0200571 if (WARN_ON(!mvmsta))
572 return -EINVAL;
Sara Sharon01796ff2016-11-16 17:04:36 +0200573
574 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
575 /* Disable the queue */
576 if (disable_agg_tids)
577 iwl_mvm_invalidate_sta_queue(mvm, queue,
578 disable_agg_tids, false);
579
580 ret = iwl_mvm_disable_txq(mvm, queue,
581 mvmsta->vif->hw_queue[txq_curr_ac],
582 tid, 0);
583 if (ret) {
584 /* Re-mark the inactive queue as inactive */
585 spin_lock_bh(&mvm->queue_info_lock);
586 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
587 spin_unlock_bh(&mvm->queue_info_lock);
588 IWL_ERR(mvm,
589 "Failed to free inactive queue %d (ret=%d)\n",
590 queue, ret);
591
592 return ret;
593 }
594
595 /* If TXQ is allocated to another STA, update removal in FW */
596 if (!same_sta)
597 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
598
599 return 0;
600}
601
Liad Kaufman42db09c2016-05-02 14:01:14 +0300602static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
603 unsigned long tfd_queue_mask, u8 ac)
604{
605 int queue = 0;
606 u8 ac_to_queue[IEEE80211_NUM_ACS];
607 int i;
608
609 lockdep_assert_held(&mvm->queue_info_lock);
Sara Sharonbb497012016-09-29 14:52:40 +0300610 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
611 return -EINVAL;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300612
613 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
614
615 /* See what ACs the existing queues for this STA have */
616 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
617 /* Only DATA queues can be shared */
618 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
619 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
620 continue;
621
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200622 /* Don't try and take queues being reconfigured */
623 if (mvm->queue_info[queue].status ==
624 IWL_MVM_QUEUE_RECONFIGURING)
625 continue;
626
Liad Kaufman42db09c2016-05-02 14:01:14 +0300627 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
628 }
629
630 /*
631 * The queue to share is chosen only from DATA queues as follows (in
632 * descending priority):
633 * 1. An AC_BE queue
634 * 2. Same AC queue
635 * 3. Highest AC queue that is lower than new AC
636 * 4. Any existing AC (there always is at least 1 DATA queue)
637 */
638
639 /* Priority 1: An AC_BE queue */
640 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
641 queue = ac_to_queue[IEEE80211_AC_BE];
642 /* Priority 2: Same AC queue */
643 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
644 queue = ac_to_queue[ac];
645 /* Priority 3a: If new AC is VO and VI exists - use VI */
646 else if (ac == IEEE80211_AC_VO &&
647 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
648 queue = ac_to_queue[IEEE80211_AC_VI];
649 /* Priority 3b: No BE so only AC less than the new one is BK */
650 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
651 queue = ac_to_queue[IEEE80211_AC_BK];
652 /* Priority 4a: No BE nor BK - use VI if exists */
653 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
654 queue = ac_to_queue[IEEE80211_AC_VI];
655 /* Priority 4b: No BE, BK nor VI - use VO if exists */
656 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
657 queue = ac_to_queue[IEEE80211_AC_VO];
658
659 /* Make sure queue found (or not) is legal */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200660 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
661 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
662 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
Liad Kaufman42db09c2016-05-02 14:01:14 +0300663 IWL_ERR(mvm, "No DATA queues available to share\n");
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200664 return -ENOSPC;
665 }
666
667 /* Make sure the queue isn't in the middle of being reconfigured */
668 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
669 IWL_ERR(mvm,
670 "TXQ %d is in the middle of re-config - try again\n",
671 queue);
672 return -EBUSY;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300673 }
674
675 return queue;
676}
677
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200678/*
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200679 * If a given queue has a higher AC than the TID stream that is being compared
680 * to, the queue needs to be redirected to the lower AC. This function does that
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200681 * in such a case, otherwise - if no redirection required - it does nothing,
682 * unless the %force param is true.
683 */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200684int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
685 int ac, int ssn, unsigned int wdg_timeout,
686 bool force)
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200687{
688 struct iwl_scd_txq_cfg_cmd cmd = {
689 .scd_queue = queue,
Liad Kaufmanf7c692d2016-03-08 10:41:32 +0200690 .action = SCD_CFG_DISABLE_QUEUE,
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200691 };
692 bool shared_queue;
693 unsigned long mq;
694 int ret;
695
Sara Sharonbb497012016-09-29 14:52:40 +0300696 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
697 return -EINVAL;
698
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200699 /*
700 * If the AC is lower than current one - FIFO needs to be redirected to
701 * the lowest one of the streams in the queue. Check if this is needed
702 * here.
703 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
704 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
705 * we need to check if the numerical value of X is LARGER than of Y.
706 */
707 spin_lock_bh(&mvm->queue_info_lock);
708 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
709 spin_unlock_bh(&mvm->queue_info_lock);
710
711 IWL_DEBUG_TX_QUEUES(mvm,
712 "No redirection needed on TXQ #%d\n",
713 queue);
714 return 0;
715 }
716
717 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
718 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200719 cmd.tid = mvm->queue_info[queue].txq_tid;
Sara Sharon34e10862017-02-23 13:15:07 +0200720 mq = mvm->hw_queue_to_mac80211[queue];
Johannes Berg1c140892018-07-04 11:58:28 +0200721 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200722 spin_unlock_bh(&mvm->queue_info_lock);
723
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200724 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200725 queue, iwl_mvm_ac_to_tx_fifo[ac]);
726
727 /* Stop MAC queues and wait for this queue to empty */
728 iwl_mvm_stop_mac_queues(mvm, mq);
Sara Sharona1a57872017-03-05 11:38:58 +0200729 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200730 if (ret) {
731 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
732 queue);
733 ret = -EIO;
734 goto out;
735 }
736
737 /* Before redirecting the queue we need to de-activate it */
738 iwl_trans_txq_disable(mvm->trans, queue, false);
739 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
740 if (ret)
741 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
742 ret);
743
744 /* Make sure the SCD wrptr is correctly set before reconfiguring */
Sara Sharonca3b9c62016-06-30 16:14:02 +0300745 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200746
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200747 /* Update the TID "owner" of the queue */
748 spin_lock_bh(&mvm->queue_info_lock);
749 mvm->queue_info[queue].txq_tid = tid;
750 spin_unlock_bh(&mvm->queue_info_lock);
751
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200752 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
753
754 /* Redirect to lower AC */
755 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
Sara Sharon0ec9257b2017-10-16 09:45:10 +0300756 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200757
758 /* Update AC marking of the queue */
759 spin_lock_bh(&mvm->queue_info_lock);
760 mvm->queue_info[queue].mac80211_ac = ac;
761 spin_unlock_bh(&mvm->queue_info_lock);
762
763 /*
764 * Mark queue as shared in transport if shared
765 * Note this has to be done after queue enablement because enablement
766 * can also set this value, and there is no indication there to shared
767 * queues
768 */
769 if (shared_queue)
770 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
771
772out:
773 /* Continue using the MAC queues */
774 iwl_mvm_start_mac_queues(mvm, mq);
775
776 return ret;
777}
778
Johannes Berg99448a82018-07-04 11:38:34 +0200779static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
780 u8 minq, u8 maxq)
781{
782 int i;
783
784 lockdep_assert_held(&mvm->queue_info_lock);
785
786 /* This should not be hit with new TX path */
787 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
788 return -ENOSPC;
789
790 /* Start by looking for a free queue */
791 for (i = minq; i <= maxq; i++)
Johannes Berg1c140892018-07-04 11:58:28 +0200792 if (mvm->queue_info[i].tid_bitmap == 0 &&
Johannes Berg99448a82018-07-04 11:38:34 +0200793 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
794 return i;
795
796 return -ENOSPC;
797}
798
799static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
800 u8 sta_id, u8 tid, unsigned int timeout)
801{
802 int queue, size = IWL_DEFAULT_QUEUE_SIZE;
803
804 if (tid == IWL_MAX_TID_COUNT) {
805 tid = IWL_MGMT_TID;
806 size = IWL_MGMT_QUEUE_SIZE;
807 }
808 queue = iwl_trans_txq_alloc(mvm->trans,
809 cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
810 sta_id, tid, SCD_QUEUE_CFG, size, timeout);
811
812 if (queue < 0) {
813 IWL_DEBUG_TX_QUEUES(mvm,
814 "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
815 sta_id, tid, queue);
816 return queue;
817 }
818
819 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
820 queue, sta_id, tid);
821
822 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
823 IWL_DEBUG_TX_QUEUES(mvm,
824 "Enabling TXQ #%d (mac80211 map:0x%x)\n",
825 queue, mvm->hw_queue_to_mac80211[queue]);
826
827 return queue;
828}
829
Sara Sharon310181e2017-01-17 14:27:48 +0200830static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
831 struct ieee80211_sta *sta, u8 ac,
832 int tid)
833{
834 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
835 unsigned int wdg_timeout =
836 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
837 u8 mac_queue = mvmsta->vif->hw_queue[ac];
838 int queue = -1;
839
840 lockdep_assert_held(&mvm->mutex);
841
842 IWL_DEBUG_TX_QUEUES(mvm,
843 "Allocating queue for sta %d on tid %d\n",
844 mvmsta->sta_id, tid);
845 queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
846 wdg_timeout);
847 if (queue < 0)
848 return queue;
849
850 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
851
852 spin_lock_bh(&mvmsta->lock);
853 mvmsta->tid_data[tid].txq_id = queue;
854 mvmsta->tid_data[tid].is_tid_active = true;
Sara Sharon310181e2017-01-17 14:27:48 +0200855 spin_unlock_bh(&mvmsta->lock);
856
Sara Sharon310181e2017-01-17 14:27:48 +0200857 return 0;
858}
859
Johannes Berg99448a82018-07-04 11:38:34 +0200860static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
861 int mac80211_queue, u8 sta_id, u8 tid)
862{
863 bool enable_queue = true;
864
865 spin_lock_bh(&mvm->queue_info_lock);
866
867 /* Make sure this TID isn't already enabled */
868 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
869 spin_unlock_bh(&mvm->queue_info_lock);
870 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
871 queue, tid);
872 return false;
873 }
874
875 /* Update mappings and refcounts */
Johannes Berg1c140892018-07-04 11:58:28 +0200876 if (mvm->queue_info[queue].tid_bitmap)
Johannes Berg99448a82018-07-04 11:38:34 +0200877 enable_queue = false;
878
879 if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
880 WARN(mac80211_queue >=
881 BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
882 "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
883 mac80211_queue, queue, sta_id, tid);
884 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
885 }
886
Johannes Berg99448a82018-07-04 11:38:34 +0200887 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
888 mvm->queue_info[queue].ra_sta_id = sta_id;
889
890 if (enable_queue) {
891 if (tid != IWL_MAX_TID_COUNT)
892 mvm->queue_info[queue].mac80211_ac =
893 tid_to_mac80211_ac[tid];
894 else
895 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
896
897 mvm->queue_info[queue].txq_tid = tid;
898 }
899
900 IWL_DEBUG_TX_QUEUES(mvm,
Johannes Berg1c140892018-07-04 11:58:28 +0200901 "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
902 queue, mvm->queue_info[queue].tid_bitmap,
Johannes Berg99448a82018-07-04 11:38:34 +0200903 mvm->hw_queue_to_mac80211[queue]);
904
905 spin_unlock_bh(&mvm->queue_info_lock);
906
907 return enable_queue;
908}
909
910static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
911 int mac80211_queue, u16 ssn,
912 const struct iwl_trans_txq_scd_cfg *cfg,
913 unsigned int wdg_timeout)
914{
915 struct iwl_scd_txq_cfg_cmd cmd = {
916 .scd_queue = queue,
917 .action = SCD_CFG_ENABLE_QUEUE,
918 .window = cfg->frame_limit,
919 .sta_id = cfg->sta_id,
920 .ssn = cpu_to_le16(ssn),
921 .tx_fifo = cfg->fifo,
922 .aggregate = cfg->aggregate,
923 .tid = cfg->tid,
924 };
925 bool inc_ssn;
926
927 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
928 return false;
929
930 /* Send the enabling command if we need to */
931 if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
932 cfg->sta_id, cfg->tid))
933 return false;
934
935 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
936 NULL, wdg_timeout);
937 if (inc_ssn)
938 le16_add_cpu(&cmd.ssn, 1);
939
940 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
941 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
942
943 return inc_ssn;
944}
945
Liad Kaufman24afba72015-07-28 18:56:08 +0300946static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
947 struct ieee80211_sta *sta, u8 ac, int tid,
948 struct ieee80211_hdr *hdr)
949{
950 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
951 struct iwl_trans_txq_scd_cfg cfg = {
Emmanuel Grumbachcf6c6ea2017-06-13 13:18:48 +0300952 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
Liad Kaufman24afba72015-07-28 18:56:08 +0300953 .sta_id = mvmsta->sta_id,
954 .tid = tid,
955 .frame_limit = IWL_FRAME_LIMIT,
956 };
957 unsigned int wdg_timeout =
958 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
959 u8 mac_queue = mvmsta->vif->hw_queue[ac];
960 int queue = -1;
Sara Sharon01796ff2016-11-16 17:04:36 +0200961 bool using_inactive_queue = false, same_sta = false;
Liad Kaufman9794c642015-08-19 17:34:28 +0300962 unsigned long disable_agg_tids = 0;
963 enum iwl_mvm_agg_state queue_state;
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +0300964 bool shared_queue = false, inc_ssn;
Liad Kaufman24afba72015-07-28 18:56:08 +0300965 int ssn;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300966 unsigned long tfd_queue_mask;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300967 int ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300968
969 lockdep_assert_held(&mvm->mutex);
970
Sara Sharon310181e2017-01-17 14:27:48 +0200971 if (iwl_mvm_has_new_tx_api(mvm))
972 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
973
Liad Kaufman42db09c2016-05-02 14:01:14 +0300974 spin_lock_bh(&mvmsta->lock);
975 tfd_queue_mask = mvmsta->tfd_queue_msk;
976 spin_unlock_bh(&mvmsta->lock);
977
Liad Kaufmand2515a92016-03-23 16:31:08 +0200978 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300979
980 /*
981 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
982 * exists
983 */
984 if (!ieee80211_is_data_qos(hdr->frame_control) ||
985 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300986 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
987 IWL_MVM_DQA_MIN_MGMT_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300988 IWL_MVM_DQA_MAX_MGMT_QUEUE);
989 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
990 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
991 queue);
992
993 /* If no such queue is found, we'll use a DATA queue instead */
994 }
995
Liad Kaufman9794c642015-08-19 17:34:28 +0300996 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
997 (mvm->queue_info[mvmsta->reserved_queue].status ==
998 IWL_MVM_QUEUE_RESERVED ||
999 mvm->queue_info[mvmsta->reserved_queue].status ==
1000 IWL_MVM_QUEUE_INACTIVE)) {
Liad Kaufman24afba72015-07-28 18:56:08 +03001001 queue = mvmsta->reserved_queue;
Liad Kaufman9794c642015-08-19 17:34:28 +03001002 mvm->queue_info[queue].reserved = true;
Liad Kaufman24afba72015-07-28 18:56:08 +03001003 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1004 }
1005
1006 if (queue < 0)
Liad Kaufman9794c642015-08-19 17:34:28 +03001007 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1008 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +03001009 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufmancf961e12015-08-13 19:16:08 +03001010
1011 /*
Liad Kaufman9794c642015-08-19 17:34:28 +03001012 * Check if this queue is already allocated but inactive.
1013 * In such a case, we'll need to first free this queue before enabling
1014 * it again, so we'll mark it as reserved to make sure no new traffic
1015 * arrives on it
1016 */
1017 if (queue > 0 &&
1018 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
1019 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1020 using_inactive_queue = true;
Sara Sharon01796ff2016-11-16 17:04:36 +02001021 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
Liad Kaufman9794c642015-08-19 17:34:28 +03001022 IWL_DEBUG_TX_QUEUES(mvm,
1023 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
1024 queue, mvmsta->sta_id, tid);
1025 }
1026
Liad Kaufman42db09c2016-05-02 14:01:14 +03001027 /* No free queue - we'll have to share */
1028 if (queue <= 0) {
1029 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1030 if (queue > 0) {
1031 shared_queue = true;
1032 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1033 }
1034 }
1035
Liad Kaufman9794c642015-08-19 17:34:28 +03001036 /*
Liad Kaufmancf961e12015-08-13 19:16:08 +03001037 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1038 * to make sure no one else takes it.
1039 * This will allow avoiding re-acquiring the lock at the end of the
1040 * configuration. On error we'll mark it back as free.
1041 */
Liad Kaufman42db09c2016-05-02 14:01:14 +03001042 if ((queue > 0) && !shared_queue)
Liad Kaufmancf961e12015-08-13 19:16:08 +03001043 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman24afba72015-07-28 18:56:08 +03001044
Liad Kaufmand2515a92016-03-23 16:31:08 +02001045 spin_unlock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +03001046
Liad Kaufman42db09c2016-05-02 14:01:14 +03001047 /* This shouldn't happen - out of queues */
1048 if (WARN_ON(queue <= 0)) {
1049 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1050 tid, cfg.sta_id);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001051 return queue;
Liad Kaufman42db09c2016-05-02 14:01:14 +03001052 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001053
1054 /*
1055 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1056 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1057 * as aggregatable.
1058 * Mark all DATA queues as allowing to be aggregated at some point
1059 */
Liad Kaufmand5216a22015-08-09 15:50:51 +03001060 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1061 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +03001062
Liad Kaufman9794c642015-08-19 17:34:28 +03001063 /*
1064 * If this queue was previously inactive (idle) - we need to free it
1065 * first
1066 */
1067 if (using_inactive_queue) {
Sara Sharon01796ff2016-11-16 17:04:36 +02001068 ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
1069 if (ret)
Liad Kaufman9794c642015-08-19 17:34:28 +03001070 return ret;
Liad Kaufman9794c642015-08-19 17:34:28 +03001071 }
1072
Liad Kaufman42db09c2016-05-02 14:01:14 +03001073 IWL_DEBUG_TX_QUEUES(mvm,
1074 "Allocating %squeue #%d to sta %d on tid %d\n",
1075 shared_queue ? "shared " : "", queue,
1076 mvmsta->sta_id, tid);
1077
1078 if (shared_queue) {
1079 /* Disable any open aggs on this queue */
1080 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1081
1082 if (disable_agg_tids) {
1083 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1084 queue);
1085 iwl_mvm_invalidate_sta_queue(mvm, queue,
1086 disable_agg_tids, false);
1087 }
Liad Kaufman42db09c2016-05-02 14:01:14 +03001088 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001089
1090 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +03001091 inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
1092 ssn, &cfg, wdg_timeout);
1093 if (inc_ssn) {
1094 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1095 le16_add_cpu(&hdr->seq_ctrl, 0x10);
1096 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001097
Liad Kaufman58f2cc52015-09-30 16:44:28 +02001098 /*
1099 * Mark queue as shared in transport if shared
1100 * Note this has to be done after queue enablement because enablement
1101 * can also set this value, and there is no indication there to shared
1102 * queues
1103 */
1104 if (shared_queue)
1105 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1106
Liad Kaufman24afba72015-07-28 18:56:08 +03001107 spin_lock_bh(&mvmsta->lock);
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +03001108 /*
1109 * This looks racy, but it is not. We have only one packet for
1110 * this ra/tid in our Tx path since we stop the Qdisc when we
1111 * need to allocate a new TFD queue.
1112 */
1113 if (inc_ssn)
1114 mvmsta->tid_data[tid].seq_number += 0x10;
Liad Kaufman24afba72015-07-28 18:56:08 +03001115 mvmsta->tid_data[tid].txq_id = queue;
Liad Kaufman9794c642015-08-19 17:34:28 +03001116 mvmsta->tid_data[tid].is_tid_active = true;
Liad Kaufman24afba72015-07-28 18:56:08 +03001117 mvmsta->tfd_queue_msk |= BIT(queue);
Liad Kaufman9794c642015-08-19 17:34:28 +03001118 queue_state = mvmsta->tid_data[tid].state;
Liad Kaufman24afba72015-07-28 18:56:08 +03001119
1120 if (mvmsta->reserved_queue == queue)
1121 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1122 spin_unlock_bh(&mvmsta->lock);
1123
Liad Kaufman42db09c2016-05-02 14:01:14 +03001124 if (!shared_queue) {
1125 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1126 if (ret)
1127 goto out_err;
Liad Kaufmancf961e12015-08-13 19:16:08 +03001128
Liad Kaufman42db09c2016-05-02 14:01:14 +03001129 /* If we need to re-enable aggregations... */
1130 if (queue_state == IWL_AGG_ON) {
1131 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1132 if (ret)
1133 goto out_err;
1134 }
Liad Kaufman58f2cc52015-09-30 16:44:28 +02001135 } else {
1136 /* Redirect queue, if needed */
1137 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
1138 wdg_timeout, false);
1139 if (ret)
1140 goto out_err;
Liad Kaufman42db09c2016-05-02 14:01:14 +03001141 }
Liad Kaufman9794c642015-08-19 17:34:28 +03001142
Liad Kaufman42db09c2016-05-02 14:01:14 +03001143 return 0;
Liad Kaufmancf961e12015-08-13 19:16:08 +03001144
1145out_err:
1146 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
1147
1148 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +03001149}
1150
Liad Kaufman19aefa42016-03-08 14:29:51 +02001151static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
1152{
1153 struct iwl_scd_txq_cfg_cmd cmd = {
1154 .scd_queue = queue,
1155 .action = SCD_CFG_UPDATE_QUEUE_TID,
1156 };
Liad Kaufman19aefa42016-03-08 14:29:51 +02001157 int tid;
1158 unsigned long tid_bitmap;
1159 int ret;
1160
1161 lockdep_assert_held(&mvm->mutex);
1162
Sara Sharonbb497012016-09-29 14:52:40 +03001163 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1164 return;
1165
Liad Kaufman19aefa42016-03-08 14:29:51 +02001166 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001167 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1168 spin_unlock_bh(&mvm->queue_info_lock);
1169
1170 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
1171 return;
1172
1173 /* Find any TID for queue */
1174 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1175 cmd.tid = tid;
1176 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
1177
1178 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
Liad Kaufman341ca402016-09-18 14:51:59 +03001179 if (ret) {
Liad Kaufman19aefa42016-03-08 14:29:51 +02001180 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
1181 queue, ret);
Liad Kaufman341ca402016-09-18 14:51:59 +03001182 return;
1183 }
1184
1185 spin_lock_bh(&mvm->queue_info_lock);
1186 mvm->queue_info[queue].txq_tid = tid;
1187 spin_unlock_bh(&mvm->queue_info_lock);
1188 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
1189 queue, tid);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001190}
1191
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001192static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
1193{
1194 struct ieee80211_sta *sta;
1195 struct iwl_mvm_sta *mvmsta;
Sharon Dvir806911d2017-05-21 12:09:49 +03001196 u8 sta_id;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001197 int tid = -1;
1198 unsigned long tid_bitmap;
1199 unsigned int wdg_timeout;
1200 int ssn;
1201 int ret = true;
1202
Sara Sharonbb497012016-09-29 14:52:40 +03001203 /* queue sharing is disabled on new TX path */
1204 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1205 return;
1206
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001207 lockdep_assert_held(&mvm->mutex);
1208
1209 spin_lock_bh(&mvm->queue_info_lock);
1210 sta_id = mvm->queue_info[queue].ra_sta_id;
1211 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1212 spin_unlock_bh(&mvm->queue_info_lock);
1213
1214 /* Find TID for queue, and make sure it is the only one on the queue */
1215 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1216 if (tid_bitmap != BIT(tid)) {
1217 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
1218 queue, tid_bitmap);
1219 return;
1220 }
1221
1222 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1223 tid);
1224
1225 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1226 lockdep_is_held(&mvm->mutex));
1227
1228 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1229 return;
1230
1231 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1232 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1233
1234 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1235
1236 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
1237 tid_to_mac80211_ac[tid], ssn,
1238 wdg_timeout, true);
1239 if (ret) {
1240 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1241 return;
1242 }
1243
1244 /* If aggs should be turned back on - do it */
1245 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
Emmanuel Grumbach9cd70e82016-09-20 13:40:33 +03001246 struct iwl_mvm_add_sta_cmd cmd = {0};
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001247
1248 mvmsta->tid_disable_agg &= ~BIT(tid);
1249
1250 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1251 cmd.sta_id = mvmsta->sta_id;
1252 cmd.add_modify = STA_MODE_MODIFY;
1253 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1254 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1255 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1256
1257 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1258 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1259 if (!ret) {
1260 IWL_DEBUG_TX_QUEUES(mvm,
1261 "TXQ #%d is now aggregated again\n",
1262 queue);
1263
1264 /* Mark queue intenally as aggregating again */
1265 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1266 }
1267 }
1268
1269 spin_lock_bh(&mvm->queue_info_lock);
1270 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1271 spin_unlock_bh(&mvm->queue_info_lock);
1272}
1273
Johannes Berg99448a82018-07-04 11:38:34 +02001274/*
1275 * Remove inactive TIDs of a given queue.
1276 * If all queue TIDs are inactive - mark the queue as inactive
1277 * If only some the queue TIDs are inactive - unmap them from the queue
1278 */
1279static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1280 struct iwl_mvm_sta *mvmsta, int queue,
1281 unsigned long tid_bitmap)
1282{
1283 int tid;
1284
1285 lockdep_assert_held(&mvmsta->lock);
1286 lockdep_assert_held(&mvm->queue_info_lock);
1287
1288 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1289 return;
1290
1291 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1292 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1293 /* If some TFDs are still queued - don't mark TID as inactive */
1294 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1295 tid_bitmap &= ~BIT(tid);
1296
1297 /* Don't mark as inactive any TID that has an active BA */
1298 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1299 tid_bitmap &= ~BIT(tid);
1300 }
1301
1302 /* If all TIDs in the queue are inactive - mark queue as inactive. */
1303 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1304 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
1305
1306 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1)
1307 mvmsta->tid_data[tid].is_tid_active = false;
1308
1309 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n",
1310 queue);
1311 return;
1312 }
1313
1314 /*
1315 * If we are here, this is a shared queue and not all TIDs timed-out.
1316 * Remove the ones that did.
1317 */
1318 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1319 int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
1320
1321 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1322 mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
Johannes Berg99448a82018-07-04 11:38:34 +02001323 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1324 mvmsta->tid_data[tid].is_tid_active = false;
1325
1326 IWL_DEBUG_TX_QUEUES(mvm,
1327 "Removing inactive TID %d from shared Q:%d\n",
1328 tid, queue);
1329 }
1330
1331 IWL_DEBUG_TX_QUEUES(mvm,
1332 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1333 mvm->queue_info[queue].tid_bitmap);
1334
1335 /*
1336 * There may be different TIDs with the same mac queues, so make
1337 * sure all TIDs have existing corresponding mac queues enabled
1338 */
1339 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1340 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1341 mvm->hw_queue_to_mac80211[queue] |=
1342 BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
1343 }
1344
1345 /* If the queue is marked as shared - "unshare" it */
Johannes Berg1c140892018-07-04 11:58:28 +02001346 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
Johannes Berg99448a82018-07-04 11:38:34 +02001347 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1348 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
1349 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1350 queue);
1351 }
1352}
1353
1354static void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
1355{
1356 unsigned long timeout_queues_map = 0;
1357 unsigned long now = jiffies;
1358 int i;
1359
1360 if (iwl_mvm_has_new_tx_api(mvm))
1361 return;
1362
1363 spin_lock_bh(&mvm->queue_info_lock);
Johannes Berg1c140892018-07-04 11:58:28 +02001364 /* skip the CMD queue */
1365 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1366 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1367 if (mvm->queue_info[i].tid_bitmap)
Johannes Berg99448a82018-07-04 11:38:34 +02001368 timeout_queues_map |= BIT(i);
Johannes Berg1c140892018-07-04 11:58:28 +02001369 }
Johannes Berg99448a82018-07-04 11:38:34 +02001370 spin_unlock_bh(&mvm->queue_info_lock);
1371
1372 rcu_read_lock();
1373
1374 /*
1375 * If a queue times out - mark it as INACTIVE (don't remove right away
1376 * if we don't have to.) This is an optimization in case traffic comes
1377 * later, and we don't HAVE to use a currently-inactive queue
1378 */
1379 for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) {
1380 struct ieee80211_sta *sta;
1381 struct iwl_mvm_sta *mvmsta;
1382 u8 sta_id;
1383 int tid;
1384 unsigned long inactive_tid_bitmap = 0;
1385 unsigned long queue_tid_bitmap;
1386
1387 spin_lock_bh(&mvm->queue_info_lock);
1388 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1389
1390 /* If TXQ isn't in active use anyway - nothing to do here... */
1391 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1392 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) {
1393 spin_unlock_bh(&mvm->queue_info_lock);
1394 continue;
1395 }
1396
1397 /* Check to see if there are inactive TIDs on this queue */
1398 for_each_set_bit(tid, &queue_tid_bitmap,
1399 IWL_MAX_TID_COUNT + 1) {
1400 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1401 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1402 continue;
1403
1404 inactive_tid_bitmap |= BIT(tid);
1405 }
1406 spin_unlock_bh(&mvm->queue_info_lock);
1407
1408 /* If all TIDs are active - finish check on this queue */
1409 if (!inactive_tid_bitmap)
1410 continue;
1411
1412 /*
1413 * If we are here - the queue hadn't been served recently and is
1414 * in use
1415 */
1416
1417 sta_id = mvm->queue_info[i].ra_sta_id;
1418 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1419
1420 /*
1421 * If the STA doesn't exist anymore, it isn't an error. It could
1422 * be that it was removed since getting the queues, and in this
1423 * case it should've inactivated its queues anyway.
1424 */
1425 if (IS_ERR_OR_NULL(sta))
1426 continue;
1427
1428 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1429
1430 spin_lock_bh(&mvmsta->lock);
1431 spin_lock(&mvm->queue_info_lock);
1432 iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1433 inactive_tid_bitmap);
1434 spin_unlock(&mvm->queue_info_lock);
1435 spin_unlock_bh(&mvmsta->lock);
1436 }
1437
1438 rcu_read_unlock();
1439}
1440
Liad Kaufman24afba72015-07-28 18:56:08 +03001441static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1442{
1443 if (tid == IWL_MAX_TID_COUNT)
1444 return IEEE80211_AC_VO; /* MGMT */
1445
1446 return tid_to_mac80211_ac[tid];
1447}
1448
1449static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
1450 struct ieee80211_sta *sta, int tid)
1451{
1452 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1453 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1454 struct sk_buff *skb;
1455 struct ieee80211_hdr *hdr;
1456 struct sk_buff_head deferred_tx;
1457 u8 mac_queue;
1458 bool no_queue = false; /* Marks if there is a problem with the queue */
1459 u8 ac;
1460
1461 lockdep_assert_held(&mvm->mutex);
1462
1463 skb = skb_peek(&tid_data->deferred_tx_frames);
1464 if (!skb)
1465 return;
1466 hdr = (void *)skb->data;
1467
1468 ac = iwl_mvm_tid_to_ac_queue(tid);
1469 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1470
Sara Sharon6862fce2017-02-22 19:34:17 +02001471 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
Liad Kaufman24afba72015-07-28 18:56:08 +03001472 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1473 IWL_ERR(mvm,
1474 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1475 mvmsta->sta_id, tid);
1476
1477 /*
1478 * Mark queue as problematic so later the deferred traffic is
1479 * freed, as we can do nothing with it
1480 */
1481 no_queue = true;
1482 }
1483
1484 __skb_queue_head_init(&deferred_tx);
1485
Liad Kaufmand2515a92016-03-23 16:31:08 +02001486 /* Disable bottom-halves when entering TX path */
1487 local_bh_disable();
Liad Kaufman24afba72015-07-28 18:56:08 +03001488 spin_lock(&mvmsta->lock);
1489 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
Liad Kaufmanad5de732016-09-27 16:01:10 +03001490 mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
Liad Kaufman24afba72015-07-28 18:56:08 +03001491 spin_unlock(&mvmsta->lock);
1492
Liad Kaufman24afba72015-07-28 18:56:08 +03001493 while ((skb = __skb_dequeue(&deferred_tx)))
1494 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1495 ieee80211_free_txskb(mvm->hw, skb);
1496 local_bh_enable();
1497
1498 /* Wake queue */
1499 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1500}
1501
1502void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1503{
1504 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1505 add_stream_wk);
1506 struct ieee80211_sta *sta;
1507 struct iwl_mvm_sta *mvmsta;
1508 unsigned long deferred_tid_traffic;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001509 int queue, sta_id, tid;
Liad Kaufman24afba72015-07-28 18:56:08 +03001510
Liad Kaufman9794c642015-08-19 17:34:28 +03001511 /* Check inactivity of queues */
1512 iwl_mvm_inactivity_check(mvm);
1513
Liad Kaufman24afba72015-07-28 18:56:08 +03001514 mutex_lock(&mvm->mutex);
1515
Sara Sharon34e10862017-02-23 13:15:07 +02001516 /* No queue reconfiguration in TVQM mode */
1517 if (iwl_mvm_has_new_tx_api(mvm))
1518 goto alloc_queues;
1519
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001520 /* Reconfigure queues requiring reconfiguation */
Sara Sharon34e10862017-02-23 13:15:07 +02001521 for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001522 bool reconfig;
Liad Kaufman19aefa42016-03-08 14:29:51 +02001523 bool change_owner;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001524
1525 spin_lock_bh(&mvm->queue_info_lock);
1526 reconfig = (mvm->queue_info[queue].status ==
1527 IWL_MVM_QUEUE_RECONFIGURING);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001528
1529 /*
1530 * We need to take into account a situation in which a TXQ was
1531 * allocated to TID x, and then turned shared by adding TIDs y
1532 * and z. If TID x becomes inactive and is removed from the TXQ,
1533 * ownership must be given to one of the remaining TIDs.
1534 * This is mainly because if TID x continues - a new queue can't
1535 * be allocated for it as long as it is an owner of another TXQ.
1536 */
1537 change_owner = !(mvm->queue_info[queue].tid_bitmap &
1538 BIT(mvm->queue_info[queue].txq_tid)) &&
1539 (mvm->queue_info[queue].status ==
1540 IWL_MVM_QUEUE_SHARED);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001541 spin_unlock_bh(&mvm->queue_info_lock);
1542
1543 if (reconfig)
1544 iwl_mvm_unshare_queue(mvm, queue);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001545 else if (change_owner)
1546 iwl_mvm_change_queue_owner(mvm, queue);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001547 }
1548
Sara Sharon34e10862017-02-23 13:15:07 +02001549alloc_queues:
Liad Kaufman24afba72015-07-28 18:56:08 +03001550 /* Go over all stations with deferred traffic */
1551 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1552 IWL_MVM_STATION_COUNT) {
1553 clear_bit(sta_id, mvm->sta_deferred_frames);
1554 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1555 lockdep_is_held(&mvm->mutex));
1556 if (IS_ERR_OR_NULL(sta))
1557 continue;
1558
1559 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1560 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1561
1562 for_each_set_bit(tid, &deferred_tid_traffic,
1563 IWL_MAX_TID_COUNT + 1)
1564 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1565 }
1566
1567 mutex_unlock(&mvm->mutex);
1568}
1569
1570static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001571 struct ieee80211_sta *sta,
1572 enum nl80211_iftype vif_type)
Liad Kaufman24afba72015-07-28 18:56:08 +03001573{
1574 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1575 int queue;
Sara Sharon01796ff2016-11-16 17:04:36 +02001576 bool using_inactive_queue = false, same_sta = false;
Liad Kaufman24afba72015-07-28 18:56:08 +03001577
Sara Sharon396952e2017-02-22 19:40:55 +02001578 /* queue reserving is disabled on new TX path */
1579 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1580 return 0;
1581
Liad Kaufman9794c642015-08-19 17:34:28 +03001582 /*
1583 * Check for inactive queues, so we don't reach a situation where we
1584 * can't add a STA due to a shortage in queues that doesn't really exist
1585 */
1586 iwl_mvm_inactivity_check(mvm);
1587
Liad Kaufman24afba72015-07-28 18:56:08 +03001588 spin_lock_bh(&mvm->queue_info_lock);
1589
1590 /* Make sure we have free resources for this STA */
Liad Kaufmand5216a22015-08-09 15:50:51 +03001591 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
Johannes Berg1c140892018-07-04 11:58:28 +02001592 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
Liad Kaufmancf961e12015-08-13 19:16:08 +03001593 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1594 IWL_MVM_QUEUE_FREE))
Liad Kaufmand5216a22015-08-09 15:50:51 +03001595 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1596 else
Liad Kaufman9794c642015-08-19 17:34:28 +03001597 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1598 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001599 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +03001600 if (queue < 0) {
1601 spin_unlock_bh(&mvm->queue_info_lock);
1602 IWL_ERR(mvm, "No available queues for new station\n");
1603 return -ENOSPC;
Sara Sharon01796ff2016-11-16 17:04:36 +02001604 } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
1605 /*
1606 * If this queue is already allocated but inactive we'll need to
1607 * first free this queue before enabling it again, we'll mark
1608 * it as reserved to make sure no new traffic arrives on it
1609 */
1610 using_inactive_queue = true;
1611 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
Liad Kaufman24afba72015-07-28 18:56:08 +03001612 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03001613 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman24afba72015-07-28 18:56:08 +03001614
1615 spin_unlock_bh(&mvm->queue_info_lock);
1616
1617 mvmsta->reserved_queue = queue;
1618
Sara Sharon01796ff2016-11-16 17:04:36 +02001619 if (using_inactive_queue)
1620 iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
1621
Liad Kaufman24afba72015-07-28 18:56:08 +03001622 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1623 queue, mvmsta->sta_id);
1624
1625 return 0;
1626}
1627
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001628/*
1629 * In DQA mode, after a HW restart the queues should be allocated as before, in
1630 * order to avoid race conditions when there are shared queues. This function
1631 * does the re-mapping and queue allocation.
1632 *
1633 * Note that re-enabling aggregations isn't done in this function.
1634 */
1635static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1636 struct iwl_mvm_sta *mvm_sta)
1637{
1638 unsigned int wdg_timeout =
1639 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1640 int i;
1641 struct iwl_trans_txq_scd_cfg cfg = {
1642 .sta_id = mvm_sta->sta_id,
1643 .frame_limit = IWL_FRAME_LIMIT,
1644 };
1645
Johannes Berg03c902b2016-12-02 12:03:36 +01001646 /* Make sure reserved queue is still marked as such (if allocated) */
1647 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1648 mvm->queue_info[mvm_sta->reserved_queue].status =
1649 IWL_MVM_QUEUE_RESERVED;
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001650
1651 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1652 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1653 int txq_id = tid_data->txq_id;
1654 int ac;
1655 u8 mac_queue;
1656
Sara Sharon6862fce2017-02-22 19:34:17 +02001657 if (txq_id == IWL_MVM_INVALID_QUEUE)
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001658 continue;
1659
1660 skb_queue_head_init(&tid_data->deferred_tx_frames);
1661
1662 ac = tid_to_mac80211_ac[i];
1663 mac_queue = mvm_sta->vif->hw_queue[ac];
1664
Sara Sharon310181e2017-01-17 14:27:48 +02001665 if (iwl_mvm_has_new_tx_api(mvm)) {
1666 IWL_DEBUG_TX_QUEUES(mvm,
1667 "Re-mapping sta %d tid %d\n",
1668 mvm_sta->sta_id, i);
1669 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
1670 mvm_sta->sta_id,
1671 i, wdg_timeout);
1672 tid_data->txq_id = txq_id;
Liad Kaufman5d390512017-10-17 16:26:00 +03001673
1674 /*
1675 * Since we don't set the seq number after reset, and HW
1676 * sets it now, FW reset will cause the seq num to start
1677 * at 0 again, so driver will need to update it
1678 * internally as well, so it keeps in sync with real val
1679 */
1680 tid_data->seq_number = 0;
Sara Sharon310181e2017-01-17 14:27:48 +02001681 } else {
1682 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001683
Sara Sharon310181e2017-01-17 14:27:48 +02001684 cfg.tid = i;
Emmanuel Grumbachcf6c6ea2017-06-13 13:18:48 +03001685 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
Sara Sharon310181e2017-01-17 14:27:48 +02001686 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1687 txq_id ==
1688 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001689
Sara Sharon310181e2017-01-17 14:27:48 +02001690 IWL_DEBUG_TX_QUEUES(mvm,
1691 "Re-mapping sta %d tid %d to queue %d\n",
1692 mvm_sta->sta_id, i, txq_id);
1693
1694 iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
1695 wdg_timeout);
Sara Sharon34e10862017-02-23 13:15:07 +02001696 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
Sara Sharon310181e2017-01-17 14:27:48 +02001697 }
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001698 }
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001699}
1700
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001701static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1702 struct iwl_mvm_int_sta *sta,
1703 const u8 *addr,
1704 u16 mac_id, u16 color)
1705{
1706 struct iwl_mvm_add_sta_cmd cmd;
1707 int ret;
Luca Coelho3f497de2017-09-02 11:05:22 +03001708 u32 status = ADD_STA_SUCCESS;
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001709
1710 lockdep_assert_held(&mvm->mutex);
1711
1712 memset(&cmd, 0, sizeof(cmd));
1713 cmd.sta_id = sta->sta_id;
1714 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1715 color));
1716 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1717 cmd.station_type = sta->type;
1718
1719 if (!iwl_mvm_has_new_tx_api(mvm))
1720 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1721 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1722
1723 if (addr)
1724 memcpy(cmd.addr, addr, ETH_ALEN);
1725
1726 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1727 iwl_mvm_add_sta_cmd_size(mvm),
1728 &cmd, &status);
1729 if (ret)
1730 return ret;
1731
1732 switch (status & IWL_ADD_STA_STATUS_MASK) {
1733 case ADD_STA_SUCCESS:
1734 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1735 return 0;
1736 default:
1737 ret = -EIO;
1738 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1739 status);
1740 break;
1741 }
1742 return ret;
1743}
1744
Johannes Berg8ca151b2013-01-24 14:25:36 +01001745int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1746 struct ieee80211_vif *vif,
1747 struct ieee80211_sta *sta)
1748{
1749 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001750 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharona571f5f2015-12-07 12:50:58 +02001751 struct iwl_mvm_rxq_dup_data *dup_data;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001752 int i, ret, sta_id;
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001753 bool sta_update = false;
1754 unsigned int sta_flags = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001755
1756 lockdep_assert_held(&mvm->mutex);
1757
1758 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
Eliad Pellerb92e6612014-01-23 17:58:23 +02001759 sta_id = iwl_mvm_find_free_sta_id(mvm,
1760 ieee80211_vif_type_p2p(vif));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001761 else
1762 sta_id = mvm_sta->sta_id;
1763
Sara Sharon0ae98812017-01-04 14:53:58 +02001764 if (sta_id == IWL_MVM_INVALID_STA)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001765 return -ENOSPC;
1766
1767 spin_lock_init(&mvm_sta->lock);
1768
Johannes Bergc8f54702017-06-19 23:50:31 +02001769 /* if this is a HW restart re-alloc existing queues */
1770 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001771 struct iwl_mvm_int_sta tmp_sta = {
1772 .sta_id = sta_id,
1773 .type = mvm_sta->sta_type,
1774 };
1775
1776 /*
1777 * First add an empty station since allocating
1778 * a queue requires a valid station
1779 */
1780 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1781 mvmvif->id, mvmvif->color);
1782 if (ret)
1783 goto err;
1784
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001785 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001786 sta_update = true;
1787 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001788 goto update_fw;
1789 }
1790
Johannes Berg8ca151b2013-01-24 14:25:36 +01001791 mvm_sta->sta_id = sta_id;
1792 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1793 mvmvif->color);
1794 mvm_sta->vif = vif;
Liad Kaufmana58bb462017-05-28 14:20:04 +03001795 if (!mvm->trans->cfg->gen2)
1796 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1797 else
1798 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001799 mvm_sta->tx_protection = 0;
1800 mvm_sta->tt_tx_protection = false;
Sara Sharonced19f22017-02-06 19:09:32 +02001801 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001802
1803 /* HW restart, don't assume the memory has been zeroed */
Liad Kaufman69191af2015-09-01 18:50:22 +03001804 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
Johannes Berg8ca151b2013-01-24 14:25:36 +01001805 mvm_sta->tfd_queue_msk = 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001806
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001807 /* for HW restart - reset everything but the sequence number */
Liad Kaufman24afba72015-07-28 18:56:08 +03001808 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001809 u16 seq = mvm_sta->tid_data[i].seq_number;
1810 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1811 mvm_sta->tid_data[i].seq_number = seq;
Liad Kaufman24afba72015-07-28 18:56:08 +03001812
Liad Kaufman24afba72015-07-28 18:56:08 +03001813 /*
1814 * Mark all queues for this STA as unallocated and defer TX
1815 * frames until the queue is allocated
1816 */
Sara Sharon6862fce2017-02-22 19:34:17 +02001817 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman24afba72015-07-28 18:56:08 +03001818 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001819 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001820 mvm_sta->deferred_traffic_tid_map = 0;
Eyal Shapiraefed6642014-09-14 15:58:53 +03001821 mvm_sta->agg_tids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001822
Sara Sharona571f5f2015-12-07 12:50:58 +02001823 if (iwl_mvm_has_new_rx_api(mvm) &&
1824 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Johannes Berg92c4dca2017-06-07 10:35:54 +02001825 int q;
1826
Sara Sharona571f5f2015-12-07 12:50:58 +02001827 dup_data = kcalloc(mvm->trans->num_rx_queues,
Johannes Berg92c4dca2017-06-07 10:35:54 +02001828 sizeof(*dup_data), GFP_KERNEL);
Sara Sharona571f5f2015-12-07 12:50:58 +02001829 if (!dup_data)
1830 return -ENOMEM;
Johannes Berg92c4dca2017-06-07 10:35:54 +02001831 /*
1832 * Initialize all the last_seq values to 0xffff which can never
1833 * compare equal to the frame's seq_ctrl in the check in
1834 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1835 * number and fragmented packets don't reach that function.
1836 *
1837 * This thus allows receiving a packet with seqno 0 and the
1838 * retry bit set as the very first packet on a new TID.
1839 */
1840 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1841 memset(dup_data[q].last_seq, 0xff,
1842 sizeof(dup_data[q].last_seq));
Sara Sharona571f5f2015-12-07 12:50:58 +02001843 mvm_sta->dup_data = dup_data;
1844 }
1845
Johannes Bergc8f54702017-06-19 23:50:31 +02001846 if (!iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufmand5216a22015-08-09 15:50:51 +03001847 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1848 ieee80211_vif_type_p2p(vif));
Liad Kaufman24afba72015-07-28 18:56:08 +03001849 if (ret)
1850 goto err;
1851 }
1852
Gregory Greenman9f66a392017-11-05 18:49:48 +02001853 /*
1854 * if rs is registered with mac80211, then "add station" will be handled
1855 * via the corresponding ops, otherwise need to notify rate scaling here
1856 */
Emmanuel Grumbach4243edb2017-12-13 11:38:48 +02001857 if (iwl_mvm_has_tlc_offload(mvm))
Gregory Greenman9f66a392017-11-05 18:49:48 +02001858 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1859
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001860update_fw:
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001861 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001862 if (ret)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001863 goto err;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001864
Johannes Berg9e848012014-08-04 14:33:42 +02001865 if (vif->type == NL80211_IFTYPE_STATION) {
1866 if (!sta->tdls) {
Sara Sharon0ae98812017-01-04 14:53:58 +02001867 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001868 mvmvif->ap_sta_id = sta_id;
1869 } else {
Sara Sharon0ae98812017-01-04 14:53:58 +02001870 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001871 }
1872 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001873
1874 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1875
1876 return 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001877
1878err:
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001879 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001880}
1881
1882int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1883 bool drain)
1884{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001885 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01001886 int ret;
1887 u32 status;
1888
1889 lockdep_assert_held(&mvm->mutex);
1890
1891 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1892 cmd.sta_id = mvmsta->sta_id;
1893 cmd.add_modify = STA_MODE_MODIFY;
1894 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1895 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1896
1897 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02001898 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1899 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001900 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001901 if (ret)
1902 return ret;
1903
Sara Sharon837c4da2016-01-07 16:50:45 +02001904 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001905 case ADD_STA_SUCCESS:
1906 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1907 mvmsta->sta_id);
1908 break;
1909 default:
1910 ret = -EIO;
1911 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1912 mvmsta->sta_id);
1913 break;
1914 }
1915
1916 return ret;
1917}
1918
1919/*
1920 * Remove a station from the FW table. Before sending the command to remove
1921 * the station validate that the station is indeed known to the driver (sanity
1922 * only).
1923 */
1924static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1925{
1926 struct ieee80211_sta *sta;
1927 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1928 .sta_id = sta_id,
1929 };
1930 int ret;
1931
1932 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1933 lockdep_is_held(&mvm->mutex));
1934
1935 /* Note: internal stations are marked as error values */
1936 if (!sta) {
1937 IWL_ERR(mvm, "Invalid station id\n");
1938 return -EINVAL;
1939 }
1940
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03001941 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01001942 sizeof(rm_sta_cmd), &rm_sta_cmd);
1943 if (ret) {
1944 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1945 return ret;
1946 }
1947
1948 return 0;
1949}
1950
Liad Kaufman24afba72015-07-28 18:56:08 +03001951static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1952 struct ieee80211_vif *vif,
1953 struct iwl_mvm_sta *mvm_sta)
1954{
1955 int ac;
1956 int i;
1957
1958 lockdep_assert_held(&mvm->mutex);
1959
1960 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
Sara Sharon6862fce2017-02-22 19:34:17 +02001961 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
Liad Kaufman24afba72015-07-28 18:56:08 +03001962 continue;
1963
1964 ac = iwl_mvm_tid_to_ac_queue(i);
1965 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1966 vif->hw_queue[ac], i, 0);
Sara Sharon6862fce2017-02-22 19:34:17 +02001967 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman24afba72015-07-28 18:56:08 +03001968 }
1969}
1970
Sara Sharond6d517b2017-03-06 10:16:11 +02001971int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1972 struct iwl_mvm_sta *mvm_sta)
1973{
Sharon Dvirbec95222017-06-12 11:40:33 +03001974 int i;
Sara Sharond6d517b2017-03-06 10:16:11 +02001975
1976 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1977 u16 txq_id;
Sharon Dvirbec95222017-06-12 11:40:33 +03001978 int ret;
Sara Sharond6d517b2017-03-06 10:16:11 +02001979
1980 spin_lock_bh(&mvm_sta->lock);
1981 txq_id = mvm_sta->tid_data[i].txq_id;
1982 spin_unlock_bh(&mvm_sta->lock);
1983
1984 if (txq_id == IWL_MVM_INVALID_QUEUE)
1985 continue;
1986
1987 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1988 if (ret)
Sharon Dvirbec95222017-06-12 11:40:33 +03001989 return ret;
Sara Sharond6d517b2017-03-06 10:16:11 +02001990 }
1991
Sharon Dvirbec95222017-06-12 11:40:33 +03001992 return 0;
Sara Sharond6d517b2017-03-06 10:16:11 +02001993}
1994
Johannes Berg8ca151b2013-01-24 14:25:36 +01001995int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1996 struct ieee80211_vif *vif,
1997 struct ieee80211_sta *sta)
1998{
1999 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002000 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharon94c3e612016-12-07 15:04:37 +02002001 u8 sta_id = mvm_sta->sta_id;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002002 int ret;
2003
2004 lockdep_assert_held(&mvm->mutex);
2005
Sara Sharona571f5f2015-12-07 12:50:58 +02002006 if (iwl_mvm_has_new_rx_api(mvm))
2007 kfree(mvm_sta->dup_data);
2008
Johannes Bergc8f54702017-06-19 23:50:31 +02002009 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
2010 if (ret)
2011 return ret;
Sara Sharond6d517b2017-03-06 10:16:11 +02002012
Johannes Bergc8f54702017-06-19 23:50:31 +02002013 /* flush its queues here since we are freeing mvm_sta */
2014 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
2015 if (ret)
2016 return ret;
2017 if (iwl_mvm_has_new_tx_api(mvm)) {
2018 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
2019 } else {
2020 u32 q_mask = mvm_sta->tfd_queue_msk;
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02002021
Johannes Bergc8f54702017-06-19 23:50:31 +02002022 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2023 q_mask);
2024 }
2025 if (ret)
2026 return ret;
Liad Kaufman56214742016-09-22 15:14:08 +03002027
Johannes Bergc8f54702017-06-19 23:50:31 +02002028 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
Liad Kaufmana0315dea2016-07-07 13:25:59 +03002029
Johannes Bergc8f54702017-06-19 23:50:31 +02002030 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
Liad Kaufmana0315dea2016-07-07 13:25:59 +03002031
Johannes Bergc8f54702017-06-19 23:50:31 +02002032 /* If there is a TXQ still marked as reserved - free it */
2033 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
2034 u8 reserved_txq = mvm_sta->reserved_queue;
2035 enum iwl_mvm_queue_status *status;
2036
2037 /*
2038 * If no traffic has gone through the reserved TXQ - it
2039 * is still marked as IWL_MVM_QUEUE_RESERVED, and
2040 * should be manually marked as free again
2041 */
2042 spin_lock_bh(&mvm->queue_info_lock);
2043 status = &mvm->queue_info[reserved_txq].status;
2044 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
2045 (*status != IWL_MVM_QUEUE_FREE),
2046 "sta_id %d reserved txq %d status %d",
2047 sta_id, reserved_txq, *status)) {
Liad Kaufmana0315dea2016-07-07 13:25:59 +03002048 spin_unlock_bh(&mvm->queue_info_lock);
Johannes Bergc8f54702017-06-19 23:50:31 +02002049 return -EINVAL;
Liad Kaufmana0315dea2016-07-07 13:25:59 +03002050 }
2051
Johannes Bergc8f54702017-06-19 23:50:31 +02002052 *status = IWL_MVM_QUEUE_FREE;
2053 spin_unlock_bh(&mvm->queue_info_lock);
2054 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002055
Johannes Bergc8f54702017-06-19 23:50:31 +02002056 if (vif->type == NL80211_IFTYPE_STATION &&
2057 mvmvif->ap_sta_id == sta_id) {
2058 /* if associated - we can't remove the AP STA now */
2059 if (vif->bss_conf.assoc)
2060 return ret;
Eliad Peller37577fe2013-12-05 17:19:39 +02002061
Johannes Bergc8f54702017-06-19 23:50:31 +02002062 /* unassoc - go ahead - remove the AP STA now */
2063 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
2064
2065 /* clear d0i3_ap_sta_id if no longer relevant */
2066 if (mvm->d0i3_ap_sta_id == sta_id)
2067 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002068 }
2069
2070 /*
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03002071 * This shouldn't happen - the TDLS channel switch should be canceled
2072 * before the STA is removed.
2073 */
Sara Sharon94c3e612016-12-07 15:04:37 +02002074 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
Sara Sharon0ae98812017-01-04 14:53:58 +02002075 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03002076 cancel_delayed_work(&mvm->tdls_cs.dwork);
2077 }
2078
2079 /*
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03002080 * Make sure that the tx response code sees the station as -EBUSY and
2081 * calls the drain worker.
2082 */
2083 spin_lock_bh(&mvm_sta->lock);
Johannes Bergc8f54702017-06-19 23:50:31 +02002084 spin_unlock_bh(&mvm_sta->lock);
Sara Sharon94c3e612016-12-07 15:04:37 +02002085
Johannes Bergc8f54702017-06-19 23:50:31 +02002086 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
2087 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002088
2089 return ret;
2090}
2091
2092int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
2093 struct ieee80211_vif *vif,
2094 u8 sta_id)
2095{
2096 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
2097
2098 lockdep_assert_held(&mvm->mutex);
2099
Monam Agarwalc531c772014-03-24 00:05:56 +05302100 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002101 return ret;
2102}
2103
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002104int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
2105 struct iwl_mvm_int_sta *sta,
Sara Sharonced19f22017-02-06 19:09:32 +02002106 u32 qmask, enum nl80211_iftype iftype,
2107 enum iwl_sta_type type)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002108{
Avraham Sterndf65c8d2018-03-06 14:10:49 +02002109 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
2110 sta->sta_id == IWL_MVM_INVALID_STA) {
Eliad Pellerb92e6612014-01-23 17:58:23 +02002111 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
Sara Sharon0ae98812017-01-04 14:53:58 +02002112 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01002113 return -ENOSPC;
2114 }
2115
2116 sta->tfd_queue_msk = qmask;
Sara Sharonced19f22017-02-06 19:09:32 +02002117 sta->type = type;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002118
2119 /* put a non-NULL value so iterating over the stations won't stop */
2120 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
2121 return 0;
2122}
2123
Sara Sharon26d6c162017-01-03 12:00:19 +02002124void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002125{
Monam Agarwalc531c772014-03-24 00:05:56 +05302126 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002127 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
Sara Sharon0ae98812017-01-04 14:53:58 +02002128 sta->sta_id = IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002129}
2130
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002131static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
2132 u8 sta_id, u8 fifo)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002133{
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02002134 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
2135 mvm->cfg->base_params->wd_timeout :
2136 IWL_WATCHDOG_DISABLED;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002137
Sara Sharon310181e2017-01-17 14:27:48 +02002138 if (iwl_mvm_has_new_tx_api(mvm)) {
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002139 int tvqm_queue =
2140 iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
2141 IWL_MAX_TID_COUNT,
2142 wdg_timeout);
2143 *queue = tvqm_queue;
Johannes Bergc8f54702017-06-19 23:50:31 +02002144 } else {
Liad Kaufman28d07932015-09-01 16:36:25 +03002145 struct iwl_trans_txq_scd_cfg cfg = {
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002146 .fifo = fifo,
2147 .sta_id = sta_id,
Liad Kaufman28d07932015-09-01 16:36:25 +03002148 .tid = IWL_MAX_TID_COUNT,
2149 .aggregate = false,
2150 .frame_limit = IWL_FRAME_LIMIT,
2151 };
2152
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002153 iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
Liad Kaufman28d07932015-09-01 16:36:25 +03002154 }
Sara Sharonc5a719e2016-11-15 10:20:48 +02002155}
2156
2157int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
2158{
2159 int ret;
2160
2161 lockdep_assert_held(&mvm->mutex);
2162
2163 /* Allocate aux station and assign to it the aux queue */
2164 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
Sara Sharonced19f22017-02-06 19:09:32 +02002165 NL80211_IFTYPE_UNSPECIFIED,
2166 IWL_STA_AUX_ACTIVITY);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002167 if (ret)
2168 return ret;
2169
2170 /* Map Aux queue to fifo - needs to happen before adding Aux station */
2171 if (!iwl_mvm_has_new_tx_api(mvm))
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002172 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2173 mvm->aux_sta.sta_id,
2174 IWL_MVM_TX_FIFO_MCAST);
Liad Kaufman28d07932015-09-01 16:36:25 +03002175
Johannes Berg8ca151b2013-01-24 14:25:36 +01002176 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
2177 MAC_INDEX_AUX, 0);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002178 if (ret) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002179 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002180 return ret;
2181 }
2182
2183 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02002184 * For 22000 firmware and on we cannot add queue to a station unknown
Sara Sharonc5a719e2016-11-15 10:20:48 +02002185 * to firmware so enable queue here - after the station was added
2186 */
2187 if (iwl_mvm_has_new_tx_api(mvm))
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002188 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2189 mvm->aux_sta.sta_id,
2190 IWL_MVM_TX_FIFO_MCAST);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002191
2192 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002193}
2194
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002195int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2196{
2197 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002198 int ret;
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002199
2200 lockdep_assert_held(&mvm->mutex);
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002201
2202 /* Map snif queue to fifo - must happen before adding snif station */
2203 if (!iwl_mvm_has_new_tx_api(mvm))
2204 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2205 mvm->snif_sta.sta_id,
2206 IWL_MVM_TX_FIFO_BE);
2207
2208 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002209 mvmvif->id, 0);
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002210 if (ret)
2211 return ret;
2212
2213 /*
2214 * For 22000 firmware and on we cannot add queue to a station unknown
2215 * to firmware so enable queue here - after the station was added
2216 */
2217 if (iwl_mvm_has_new_tx_api(mvm))
2218 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2219 mvm->snif_sta.sta_id,
2220 IWL_MVM_TX_FIFO_BE);
2221
2222 return 0;
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002223}
2224
2225int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2226{
2227 int ret;
2228
2229 lockdep_assert_held(&mvm->mutex);
2230
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002231 iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
2232 IWL_MAX_TID_COUNT, 0);
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002233 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2234 if (ret)
2235 IWL_WARN(mvm, "Failed sending remove station\n");
2236
2237 return ret;
2238}
2239
2240void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2241{
2242 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2243}
2244
Johannes Berg712b24a2014-08-04 14:14:14 +02002245void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
2246{
2247 lockdep_assert_held(&mvm->mutex);
2248
2249 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2250}
2251
Johannes Berg8ca151b2013-01-24 14:25:36 +01002252/*
2253 * Send the add station command for the vif's broadcast station.
2254 * Assumes that the station was already allocated.
2255 *
2256 * @mvm: the mvm component
2257 * @vif: the interface to which the broadcast station is added
2258 * @bsta: the broadcast station to add.
2259 */
Johannes Berg013290a2014-08-04 13:38:48 +02002260int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002261{
2262 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002263 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg5023d962013-07-31 14:07:43 +02002264 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
Johannes Berga4243402014-01-20 23:46:38 +01002265 const u8 *baddr = _baddr;
Johannes Berg7daa7622017-02-24 12:02:22 +01002266 int queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002267 int ret;
Sara Sharonc5a719e2016-11-15 10:20:48 +02002268 unsigned int wdg_timeout =
2269 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2270 struct iwl_trans_txq_scd_cfg cfg = {
2271 .fifo = IWL_MVM_TX_FIFO_VO,
2272 .sta_id = mvmvif->bcast_sta.sta_id,
2273 .tid = IWL_MAX_TID_COUNT,
2274 .aggregate = false,
2275 .frame_limit = IWL_FRAME_LIMIT,
2276 };
Johannes Berg8ca151b2013-01-24 14:25:36 +01002277
2278 lockdep_assert_held(&mvm->mutex);
2279
Johannes Bergc8f54702017-06-19 23:50:31 +02002280 if (!iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufman4d339982017-03-21 17:13:16 +02002281 if (vif->type == NL80211_IFTYPE_AP ||
2282 vif->type == NL80211_IFTYPE_ADHOC)
Sara Sharon49f71712017-01-09 12:07:16 +02002283 queue = mvm->probe_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002284 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
Sara Sharon49f71712017-01-09 12:07:16 +02002285 queue = mvm->p2p_dev_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002286 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
Liad Kaufmande24f632015-08-04 15:19:18 +03002287 return -EINVAL;
2288
Liad Kaufmandf88c082016-11-24 15:31:00 +02002289 bsta->tfd_queue_msk |= BIT(queue);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002290
Sara Sharon310181e2017-01-17 14:27:48 +02002291 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
2292 &cfg, wdg_timeout);
Liad Kaufmande24f632015-08-04 15:19:18 +03002293 }
2294
Johannes Berg5023d962013-07-31 14:07:43 +02002295 if (vif->type == NL80211_IFTYPE_ADHOC)
2296 baddr = vif->bss_conf.bssid;
2297
Sara Sharon0ae98812017-01-04 14:53:58 +02002298 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01002299 return -ENOSPC;
2300
Liad Kaufmandf88c082016-11-24 15:31:00 +02002301 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2302 mvmvif->id, mvmvif->color);
2303 if (ret)
2304 return ret;
2305
2306 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02002307 * For 22000 firmware and on we cannot add queue to a station unknown
Sara Sharonc5a719e2016-11-15 10:20:48 +02002308 * to firmware so enable queue here - after the station was added
Liad Kaufmandf88c082016-11-24 15:31:00 +02002309 */
Sara Sharon310181e2017-01-17 14:27:48 +02002310 if (iwl_mvm_has_new_tx_api(mvm)) {
Johannes Berg7daa7622017-02-24 12:02:22 +01002311 queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
2312 bsta->sta_id,
2313 IWL_MAX_TID_COUNT,
2314 wdg_timeout);
2315
Luca Coelho7b758a12017-06-20 13:40:03 +03002316 if (vif->type == NL80211_IFTYPE_AP ||
2317 vif->type == NL80211_IFTYPE_ADHOC)
Sara Sharon310181e2017-01-17 14:27:48 +02002318 mvm->probe_queue = queue;
2319 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2320 mvm->p2p_dev_queue = queue;
Sara Sharon310181e2017-01-17 14:27:48 +02002321 }
Liad Kaufmandf88c082016-11-24 15:31:00 +02002322
2323 return 0;
2324}
2325
2326static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2327 struct ieee80211_vif *vif)
2328{
2329 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002330 int queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002331
2332 lockdep_assert_held(&mvm->mutex);
2333
Sara Sharond49394a2017-03-05 13:01:08 +02002334 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
2335
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002336 switch (vif->type) {
2337 case NL80211_IFTYPE_AP:
2338 case NL80211_IFTYPE_ADHOC:
2339 queue = mvm->probe_queue;
2340 break;
2341 case NL80211_IFTYPE_P2P_DEVICE:
2342 queue = mvm->p2p_dev_queue;
2343 break;
2344 default:
2345 WARN(1, "Can't free bcast queue on vif type %d\n",
2346 vif->type);
2347 return;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002348 }
2349
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002350 iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
2351 if (iwl_mvm_has_new_tx_api(mvm))
2352 return;
2353
2354 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2355 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002356}
2357
2358/* Send the FW a request to remove the station from it's internal data
2359 * structures, but DO NOT remove the entry from the local data structures. */
Johannes Berg013290a2014-08-04 13:38:48 +02002360int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002361{
Johannes Berg013290a2014-08-04 13:38:48 +02002362 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002363 int ret;
2364
2365 lockdep_assert_held(&mvm->mutex);
2366
Johannes Bergc8f54702017-06-19 23:50:31 +02002367 iwl_mvm_free_bcast_sta_queues(mvm, vif);
Liad Kaufmandf88c082016-11-24 15:31:00 +02002368
Johannes Berg013290a2014-08-04 13:38:48 +02002369 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002370 if (ret)
2371 IWL_WARN(mvm, "Failed sending remove station\n");
2372 return ret;
2373}
2374
Johannes Berg013290a2014-08-04 13:38:48 +02002375int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2376{
2377 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002378
2379 lockdep_assert_held(&mvm->mutex);
2380
Johannes Bergc8f54702017-06-19 23:50:31 +02002381 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
Sara Sharonced19f22017-02-06 19:09:32 +02002382 ieee80211_vif_type_p2p(vif),
2383 IWL_STA_GENERAL_PURPOSE);
Johannes Berg013290a2014-08-04 13:38:48 +02002384}
2385
Johannes Berg8ca151b2013-01-24 14:25:36 +01002386/* Allocate a new station entry for the broadcast station to the given vif,
2387 * and send it to the FW.
2388 * Note that each P2P mac should have its own broadcast station.
2389 *
2390 * @mvm: the mvm component
2391 * @vif: the interface to which the broadcast station is added
2392 * @bsta: the broadcast station to add. */
Luca Coelhod1973582017-06-22 16:00:25 +03002393int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002394{
2395 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002396 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002397 int ret;
2398
2399 lockdep_assert_held(&mvm->mutex);
2400
Johannes Berg013290a2014-08-04 13:38:48 +02002401 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002402 if (ret)
2403 return ret;
2404
Johannes Berg013290a2014-08-04 13:38:48 +02002405 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002406
2407 if (ret)
2408 iwl_mvm_dealloc_int_sta(mvm, bsta);
Johannes Berg013290a2014-08-04 13:38:48 +02002409
Johannes Berg8ca151b2013-01-24 14:25:36 +01002410 return ret;
2411}
2412
Johannes Berg013290a2014-08-04 13:38:48 +02002413void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2414{
2415 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2416
2417 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2418}
2419
Johannes Berg8ca151b2013-01-24 14:25:36 +01002420/*
2421 * Send the FW a request to remove the station from it's internal data
2422 * structures, and in addition remove it from the local data structure.
2423 */
Luca Coelhod1973582017-06-22 16:00:25 +03002424int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002425{
2426 int ret;
2427
2428 lockdep_assert_held(&mvm->mutex);
2429
Johannes Berg013290a2014-08-04 13:38:48 +02002430 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002431
Johannes Berg013290a2014-08-04 13:38:48 +02002432 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2433
Johannes Berg8ca151b2013-01-24 14:25:36 +01002434 return ret;
2435}
2436
Sara Sharon26d6c162017-01-03 12:00:19 +02002437/*
2438 * Allocate a new station entry for the multicast station to the given vif,
2439 * and send it to the FW.
2440 * Note that each AP/GO mac should have its own multicast station.
2441 *
2442 * @mvm: the mvm component
2443 * @vif: the interface to which the multicast station is added
2444 */
2445int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2446{
2447 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2448 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2449 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2450 const u8 *maddr = _maddr;
2451 struct iwl_trans_txq_scd_cfg cfg = {
2452 .fifo = IWL_MVM_TX_FIFO_MCAST,
2453 .sta_id = msta->sta_id,
Ilan Peer6508de02018-01-25 15:22:41 +02002454 .tid = 0,
Sara Sharon26d6c162017-01-03 12:00:19 +02002455 .aggregate = false,
2456 .frame_limit = IWL_FRAME_LIMIT,
2457 };
2458 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2459 int ret;
2460
2461 lockdep_assert_held(&mvm->mutex);
2462
Liad Kaufmanee48b722017-03-21 17:13:16 +02002463 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2464 vif->type != NL80211_IFTYPE_ADHOC))
Sara Sharon26d6c162017-01-03 12:00:19 +02002465 return -ENOTSUPP;
2466
Sara Sharonced19f22017-02-06 19:09:32 +02002467 /*
Sara Sharonfc07bd82017-12-21 15:05:28 +02002468 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2469 * invalid, so make sure we use the queue we want.
2470 * Note that this is done here as we want to avoid making DQA
2471 * changes in mac80211 layer.
2472 */
2473 if (vif->type == NL80211_IFTYPE_ADHOC) {
2474 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2475 mvmvif->cab_queue = vif->cab_queue;
2476 }
2477
2478 /*
Sara Sharonced19f22017-02-06 19:09:32 +02002479 * While in previous FWs we had to exclude cab queue from TFD queue
2480 * mask, now it is needed as any other queue.
2481 */
2482 if (!iwl_mvm_has_new_tx_api(mvm) &&
2483 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2484 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2485 &cfg, timeout);
2486 msta->tfd_queue_msk |= BIT(vif->cab_queue);
2487 }
Sara Sharon26d6c162017-01-03 12:00:19 +02002488 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2489 mvmvif->id, mvmvif->color);
2490 if (ret) {
2491 iwl_mvm_dealloc_int_sta(mvm, msta);
2492 return ret;
2493 }
2494
2495 /*
2496 * Enable cab queue after the ADD_STA command is sent.
Luca Coelho2f7a3862017-11-15 15:07:34 +02002497 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
Sara Sharonced19f22017-02-06 19:09:32 +02002498 * command with unknown station id, and for FW that doesn't support
2499 * station API since the cab queue is not included in the
2500 * tfd_queue_mask.
Sara Sharon26d6c162017-01-03 12:00:19 +02002501 */
Sara Sharon310181e2017-01-17 14:27:48 +02002502 if (iwl_mvm_has_new_tx_api(mvm)) {
2503 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2504 msta->sta_id,
Ilan Peer6508de02018-01-25 15:22:41 +02002505 0,
Sara Sharon310181e2017-01-17 14:27:48 +02002506 timeout);
Sara Sharone2af3fa2017-02-22 19:35:10 +02002507 mvmvif->cab_queue = queue;
Sara Sharonced19f22017-02-06 19:09:32 +02002508 } else if (!fw_has_api(&mvm->fw->ucode_capa,
Sara Sharonfc07bd82017-12-21 15:05:28 +02002509 IWL_UCODE_TLV_API_STA_TYPE))
Sara Sharon310181e2017-01-17 14:27:48 +02002510 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2511 &cfg, timeout);
Sara Sharon26d6c162017-01-03 12:00:19 +02002512
Avraham Stern337bfc92018-06-04 15:10:18 +03002513 if (mvmvif->ap_wep_key) {
2514 u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
2515
2516 if (key_offset == STA_KEY_IDX_INVALID)
2517 return -ENOSPC;
2518
2519 ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
2520 mvmvif->ap_wep_key, 1, 0, NULL, 0,
2521 key_offset, 0);
2522 if (ret)
2523 return ret;
2524 }
2525
Sara Sharon26d6c162017-01-03 12:00:19 +02002526 return 0;
2527}
2528
2529/*
2530 * Send the FW a request to remove the station from it's internal data
2531 * structures, and in addition remove it from the local data structure.
2532 */
2533int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2534{
2535 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2536 int ret;
2537
2538 lockdep_assert_held(&mvm->mutex);
2539
Sara Sharond49394a2017-03-05 13:01:08 +02002540 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2541
Sara Sharone2af3fa2017-02-22 19:35:10 +02002542 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
Ilan Peer6508de02018-01-25 15:22:41 +02002543 0, 0);
Sara Sharon26d6c162017-01-03 12:00:19 +02002544
2545 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2546 if (ret)
2547 IWL_WARN(mvm, "Failed sending remove station\n");
2548
2549 return ret;
2550}
2551
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002552#define IWL_MAX_RX_BA_SESSIONS 16
2553
Sara Sharonb915c102016-03-23 16:32:02 +02002554static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
Sara Sharon10b2b202016-03-20 16:23:41 +02002555{
Sara Sharonb915c102016-03-23 16:32:02 +02002556 struct iwl_mvm_delba_notif notif = {
2557 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2558 .metadata.sync = 1,
2559 .delba.baid = baid,
Sara Sharon10b2b202016-03-20 16:23:41 +02002560 };
Sara Sharonb915c102016-03-23 16:32:02 +02002561 iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
2562};
Sara Sharon10b2b202016-03-20 16:23:41 +02002563
Sara Sharonb915c102016-03-23 16:32:02 +02002564static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2565 struct iwl_mvm_baid_data *data)
2566{
2567 int i;
2568
2569 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2570
2571 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2572 int j;
2573 struct iwl_mvm_reorder_buffer *reorder_buf =
2574 &data->reorder_buf[i];
Johannes Bergdfdddd92017-09-26 12:24:51 +02002575 struct iwl_mvm_reorder_buf_entry *entries =
2576 &data->entries[i * data->entries_per_queue];
Sara Sharonb915c102016-03-23 16:32:02 +02002577
Sara Sharon06904052016-02-28 20:28:17 +02002578 spin_lock_bh(&reorder_buf->lock);
2579 if (likely(!reorder_buf->num_stored)) {
2580 spin_unlock_bh(&reorder_buf->lock);
Sara Sharonb915c102016-03-23 16:32:02 +02002581 continue;
Sara Sharon06904052016-02-28 20:28:17 +02002582 }
Sara Sharonb915c102016-03-23 16:32:02 +02002583
2584 /*
2585 * This shouldn't happen in regular DELBA since the internal
2586 * delBA notification should trigger a release of all frames in
2587 * the reorder buffer.
2588 */
2589 WARN_ON(1);
2590
2591 for (j = 0; j < reorder_buf->buf_size; j++)
Johannes Bergdfdddd92017-09-26 12:24:51 +02002592 __skb_queue_purge(&entries[j].e.frames);
Sara Sharon06904052016-02-28 20:28:17 +02002593 /*
2594 * Prevent timer re-arm. This prevents a very far fetched case
2595 * where we timed out on the notification. There may be prior
2596 * RX frames pending in the RX queue before the notification
2597 * that might get processed between now and the actual deletion
2598 * and we would re-arm the timer although we are deleting the
2599 * reorder buffer.
2600 */
2601 reorder_buf->removed = true;
2602 spin_unlock_bh(&reorder_buf->lock);
2603 del_timer_sync(&reorder_buf->reorder_timer);
Sara Sharonb915c102016-03-23 16:32:02 +02002604 }
2605}
2606
2607static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
Sara Sharonb915c102016-03-23 16:32:02 +02002608 struct iwl_mvm_baid_data *data,
Luca Coelho514c30692018-06-24 11:59:54 +03002609 u16 ssn, u16 buf_size)
Sara Sharonb915c102016-03-23 16:32:02 +02002610{
2611 int i;
2612
2613 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2614 struct iwl_mvm_reorder_buffer *reorder_buf =
2615 &data->reorder_buf[i];
Johannes Bergdfdddd92017-09-26 12:24:51 +02002616 struct iwl_mvm_reorder_buf_entry *entries =
2617 &data->entries[i * data->entries_per_queue];
Sara Sharonb915c102016-03-23 16:32:02 +02002618 int j;
2619
2620 reorder_buf->num_stored = 0;
2621 reorder_buf->head_sn = ssn;
2622 reorder_buf->buf_size = buf_size;
Sara Sharon06904052016-02-28 20:28:17 +02002623 /* rx reorder timer */
Kees Cook8cef5342017-10-24 02:29:37 -07002624 timer_setup(&reorder_buf->reorder_timer,
2625 iwl_mvm_reorder_timer_expired, 0);
Sara Sharon06904052016-02-28 20:28:17 +02002626 spin_lock_init(&reorder_buf->lock);
2627 reorder_buf->mvm = mvm;
Sara Sharonb915c102016-03-23 16:32:02 +02002628 reorder_buf->queue = i;
Sara Sharon5d43eab2017-02-02 12:51:39 +02002629 reorder_buf->valid = false;
Sara Sharonb915c102016-03-23 16:32:02 +02002630 for (j = 0; j < reorder_buf->buf_size; j++)
Johannes Bergdfdddd92017-09-26 12:24:51 +02002631 __skb_queue_head_init(&entries[j].e.frames);
Sara Sharonb915c102016-03-23 16:32:02 +02002632 }
Sara Sharon10b2b202016-03-20 16:23:41 +02002633}
2634
Johannes Berg8ca151b2013-01-24 14:25:36 +01002635int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Luca Coelho514c30692018-06-24 11:59:54 +03002636 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002637{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002638 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002639 struct iwl_mvm_add_sta_cmd cmd = {};
Sara Sharon10b2b202016-03-20 16:23:41 +02002640 struct iwl_mvm_baid_data *baid_data = NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002641 int ret;
2642 u32 status;
2643
2644 lockdep_assert_held(&mvm->mutex);
2645
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002646 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2647 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2648 return -ENOSPC;
2649 }
2650
Sara Sharon10b2b202016-03-20 16:23:41 +02002651 if (iwl_mvm_has_new_rx_api(mvm) && start) {
Johannes Bergdfdddd92017-09-26 12:24:51 +02002652 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2653
2654 /* sparse doesn't like the __align() so don't check */
2655#ifndef __CHECKER__
2656 /*
2657 * The division below will be OK if either the cache line size
2658 * can be divided by the entry size (ALIGN will round up) or if
2659 * if the entry size can be divided by the cache line size, in
2660 * which case the ALIGN() will do nothing.
2661 */
2662 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2663 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2664#endif
2665
2666 /*
2667 * Upward align the reorder buffer size to fill an entire cache
2668 * line for each queue, to avoid sharing cache lines between
2669 * different queues.
2670 */
2671 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2672
Sara Sharon10b2b202016-03-20 16:23:41 +02002673 /*
2674 * Allocate here so if allocation fails we can bail out early
2675 * before starting the BA session in the firmware
2676 */
Sara Sharonb915c102016-03-23 16:32:02 +02002677 baid_data = kzalloc(sizeof(*baid_data) +
2678 mvm->trans->num_rx_queues *
Johannes Bergdfdddd92017-09-26 12:24:51 +02002679 reorder_buf_size,
Sara Sharonb915c102016-03-23 16:32:02 +02002680 GFP_KERNEL);
Sara Sharon10b2b202016-03-20 16:23:41 +02002681 if (!baid_data)
2682 return -ENOMEM;
Johannes Bergdfdddd92017-09-26 12:24:51 +02002683
2684 /*
2685 * This division is why we need the above BUILD_BUG_ON(),
2686 * if that doesn't hold then this will not be right.
2687 */
2688 baid_data->entries_per_queue =
2689 reorder_buf_size / sizeof(baid_data->entries[0]);
Sara Sharon10b2b202016-03-20 16:23:41 +02002690 }
2691
Johannes Berg8ca151b2013-01-24 14:25:36 +01002692 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2693 cmd.sta_id = mvm_sta->sta_id;
2694 cmd.add_modify = STA_MODE_MODIFY;
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002695 if (start) {
2696 cmd.add_immediate_ba_tid = (u8) tid;
2697 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
Luca Coelho514c30692018-06-24 11:59:54 +03002698 cmd.rx_ba_window = cpu_to_le16(buf_size);
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002699 } else {
2700 cmd.remove_immediate_ba_tid = (u8) tid;
2701 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002702 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2703 STA_MODIFY_REMOVE_BA_TID;
2704
2705 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002706 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2707 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002708 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002709 if (ret)
Sara Sharon10b2b202016-03-20 16:23:41 +02002710 goto out_free;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002711
Sara Sharon837c4da2016-01-07 16:50:45 +02002712 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002713 case ADD_STA_SUCCESS:
Sara Sharon35263a02016-06-21 12:12:10 +03002714 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2715 start ? "start" : "stopp");
Johannes Berg8ca151b2013-01-24 14:25:36 +01002716 break;
2717 case ADD_STA_IMMEDIATE_BA_FAILURE:
2718 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2719 ret = -ENOSPC;
2720 break;
2721 default:
2722 ret = -EIO;
2723 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2724 start ? "start" : "stopp", status);
2725 break;
2726 }
2727
Sara Sharon10b2b202016-03-20 16:23:41 +02002728 if (ret)
2729 goto out_free;
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002730
Sara Sharon10b2b202016-03-20 16:23:41 +02002731 if (start) {
2732 u8 baid;
2733
2734 mvm->rx_ba_sessions++;
2735
2736 if (!iwl_mvm_has_new_rx_api(mvm))
2737 return 0;
2738
2739 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2740 ret = -EINVAL;
2741 goto out_free;
2742 }
2743 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2744 IWL_ADD_STA_BAID_SHIFT);
2745 baid_data->baid = baid;
2746 baid_data->timeout = timeout;
2747 baid_data->last_rx = jiffies;
Kees Cook8cef5342017-10-24 02:29:37 -07002748 baid_data->rcu_ptr = &mvm->baid_map[baid];
2749 timer_setup(&baid_data->session_timer,
2750 iwl_mvm_rx_agg_session_expired, 0);
Sara Sharon10b2b202016-03-20 16:23:41 +02002751 baid_data->mvm = mvm;
2752 baid_data->tid = tid;
2753 baid_data->sta_id = mvm_sta->sta_id;
2754
2755 mvm_sta->tid_to_baid[tid] = baid;
2756 if (timeout)
2757 mod_timer(&baid_data->session_timer,
2758 TU_TO_EXP_TIME(timeout * 2));
2759
Sara Sharon3f1c4c52017-10-02 12:07:59 +03002760 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
Sara Sharon10b2b202016-03-20 16:23:41 +02002761 /*
2762 * protect the BA data with RCU to cover a case where our
2763 * internal RX sync mechanism will timeout (not that it's
2764 * supposed to happen) and we will free the session data while
2765 * RX is being processed in parallel
2766 */
Sara Sharon35263a02016-06-21 12:12:10 +03002767 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2768 mvm_sta->sta_id, tid, baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002769 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2770 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
Sara Sharon60dec522016-06-21 14:14:08 +03002771 } else {
Sara Sharon10b2b202016-03-20 16:23:41 +02002772 u8 baid = mvm_sta->tid_to_baid[tid];
2773
Sara Sharon60dec522016-06-21 14:14:08 +03002774 if (mvm->rx_ba_sessions > 0)
2775 /* check that restart flow didn't zero the counter */
2776 mvm->rx_ba_sessions--;
Sara Sharon10b2b202016-03-20 16:23:41 +02002777 if (!iwl_mvm_has_new_rx_api(mvm))
2778 return 0;
2779
2780 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2781 return -EINVAL;
2782
2783 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2784 if (WARN_ON(!baid_data))
2785 return -EINVAL;
2786
2787 /* synchronize all rx queues so we can safely delete */
Sara Sharonb915c102016-03-23 16:32:02 +02002788 iwl_mvm_free_reorder(mvm, baid_data);
Sara Sharon10b2b202016-03-20 16:23:41 +02002789 del_timer_sync(&baid_data->session_timer);
Sara Sharon10b2b202016-03-20 16:23:41 +02002790 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2791 kfree_rcu(baid_data, rcu_head);
Sara Sharon35263a02016-06-21 12:12:10 +03002792 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002793 }
2794 return 0;
2795
2796out_free:
2797 kfree(baid_data);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002798 return ret;
2799}
2800
Liad Kaufman9794c642015-08-19 17:34:28 +03002801int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2802 int tid, u8 queue, bool start)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002803{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002804 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002805 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01002806 int ret;
2807 u32 status;
2808
2809 lockdep_assert_held(&mvm->mutex);
2810
2811 if (start) {
2812 mvm_sta->tfd_queue_msk |= BIT(queue);
2813 mvm_sta->tid_disable_agg &= ~BIT(tid);
2814 } else {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002815 /* In DQA-mode the queue isn't removed on agg termination */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002816 mvm_sta->tid_disable_agg |= BIT(tid);
2817 }
2818
2819 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2820 cmd.sta_id = mvm_sta->sta_id;
2821 cmd.add_modify = STA_MODE_MODIFY;
Sara Sharonbb497012016-09-29 14:52:40 +03002822 if (!iwl_mvm_has_new_tx_api(mvm))
2823 cmd.modify_mask = STA_MODIFY_QUEUES;
2824 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002825 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2826 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2827
2828 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002829 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2830 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002831 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002832 if (ret)
2833 return ret;
2834
Sara Sharon837c4da2016-01-07 16:50:45 +02002835 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002836 case ADD_STA_SUCCESS:
2837 break;
2838 default:
2839 ret = -EIO;
2840 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2841 start ? "start" : "stopp", status);
2842 break;
2843 }
2844
2845 return ret;
2846}
2847
Emmanuel Grumbachb797e3f2014-03-06 14:49:36 +02002848const u8 tid_to_mac80211_ac[] = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002849 IEEE80211_AC_BE,
2850 IEEE80211_AC_BK,
2851 IEEE80211_AC_BK,
2852 IEEE80211_AC_BE,
2853 IEEE80211_AC_VI,
2854 IEEE80211_AC_VI,
2855 IEEE80211_AC_VO,
2856 IEEE80211_AC_VO,
Liad Kaufman9794c642015-08-19 17:34:28 +03002857 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002858};
2859
Johannes Berg3e56ead2013-02-15 22:23:18 +01002860static const u8 tid_to_ucode_ac[] = {
2861 AC_BE,
2862 AC_BK,
2863 AC_BK,
2864 AC_BE,
2865 AC_VI,
2866 AC_VI,
2867 AC_VO,
2868 AC_VO,
2869};
2870
Johannes Berg8ca151b2013-01-24 14:25:36 +01002871int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2872 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2873{
Johannes Berg5b577a92013-11-14 18:20:04 +01002874 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002875 struct iwl_mvm_tid_data *tid_data;
Liad Kaufmandd321622017-04-05 16:25:11 +03002876 u16 normalized_ssn;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002877 int txq_id;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002878 int ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002879
2880 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2881 return -EINVAL;
2882
Naftali Goldsteinbd800e42017-08-28 11:51:05 +03002883 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2884 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2885 IWL_ERR(mvm,
2886 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
Johannes Berg8ca151b2013-01-24 14:25:36 +01002887 mvmsta->tid_data[tid].state);
2888 return -ENXIO;
2889 }
2890
2891 lockdep_assert_held(&mvm->mutex);
2892
Liad Kaufmanbd8f3fc2018-01-17 15:25:28 +02002893 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2894 iwl_mvm_has_new_tx_api(mvm)) {
2895 u8 ac = tid_to_mac80211_ac[tid];
2896
2897 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2898 if (ret)
2899 return ret;
2900 }
2901
Arik Nemtsovb2492502014-03-13 12:21:50 +02002902 spin_lock_bh(&mvmsta->lock);
2903
2904 /* possible race condition - we entered D0i3 while starting agg */
2905 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2906 spin_unlock_bh(&mvmsta->lock);
2907 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2908 return -EIO;
2909 }
2910
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002911 spin_lock(&mvm->queue_info_lock);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002912
Liad Kaufmancf961e12015-08-13 19:16:08 +03002913 /*
2914 * Note the possible cases:
Avraham Stern4a6d2e52018-03-05 11:26:53 +02002915 * 1. An enabled TXQ - TXQ needs to become agg'ed
2916 * 2. The TXQ hasn't yet been enabled, so find a free one and mark
2917 * it as reserved
Liad Kaufmancf961e12015-08-13 19:16:08 +03002918 */
2919 txq_id = mvmsta->tid_data[tid].txq_id;
Avraham Stern4a6d2e52018-03-05 11:26:53 +02002920 if (txq_id == IWL_MVM_INVALID_QUEUE) {
Liad Kaufman9794c642015-08-19 17:34:28 +03002921 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
Johannes Bergc8f54702017-06-19 23:50:31 +02002922 IWL_MVM_DQA_MIN_DATA_QUEUE,
2923 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002924 if (txq_id < 0) {
2925 ret = txq_id;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002926 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2927 goto release_locks;
2928 }
2929
2930 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2931 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
Avraham Stern4a6d2e52018-03-05 11:26:53 +02002932 } else if (unlikely(mvm->queue_info[txq_id].status ==
2933 IWL_MVM_QUEUE_SHARED)) {
2934 ret = -ENXIO;
2935 IWL_DEBUG_TX_QUEUES(mvm,
2936 "Can't start tid %d agg on shared queue!\n",
2937 tid);
2938 goto release_locks;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002939 }
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002940
2941 spin_unlock(&mvm->queue_info_lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002942
Liad Kaufmancf961e12015-08-13 19:16:08 +03002943 IWL_DEBUG_TX_QUEUES(mvm,
2944 "AGG for tid %d will be on queue #%d\n",
2945 tid, txq_id);
2946
Johannes Berg8ca151b2013-01-24 14:25:36 +01002947 tid_data = &mvmsta->tid_data[tid];
Johannes Berg9a886582013-02-15 19:25:00 +01002948 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002949 tid_data->txq_id = txq_id;
2950 *ssn = tid_data->ssn;
2951
2952 IWL_DEBUG_TX_QUEUES(mvm,
2953 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2954 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2955 tid_data->next_reclaimed);
2956
Liad Kaufmandd321622017-04-05 16:25:11 +03002957 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02002958 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
Liad Kaufmandd321622017-04-05 16:25:11 +03002959 * to align the wrap around of ssn so we compare relevant values.
2960 */
2961 normalized_ssn = tid_data->ssn;
2962 if (mvm->trans->cfg->gen2)
2963 normalized_ssn &= 0xff;
2964
2965 if (normalized_ssn == tid_data->next_reclaimed) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002966 tid_data->state = IWL_AGG_STARTING;
2967 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2968 } else {
2969 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2970 }
2971
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002972 ret = 0;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002973 goto out;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002974
2975release_locks:
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002976 spin_unlock(&mvm->queue_info_lock);
2977out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01002978 spin_unlock_bh(&mvmsta->lock);
2979
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002980 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002981}
2982
2983int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
Luca Coelho514c30692018-06-24 11:59:54 +03002984 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002985 bool amsdu)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002986{
Johannes Berg5b577a92013-11-14 18:20:04 +01002987 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002988 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +02002989 unsigned int wdg_timeout =
2990 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002991 int queue, ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002992 bool alloc_queue = true;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002993 enum iwl_mvm_queue_status queue_status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002994 u16 ssn;
2995
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002996 struct iwl_trans_txq_scd_cfg cfg = {
2997 .sta_id = mvmsta->sta_id,
2998 .tid = tid,
2999 .frame_limit = buf_size,
3000 .aggregate = true,
3001 };
3002
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02003003 /*
3004 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
3005 * manager, so this function should never be called in this case.
3006 */
Emmanuel Grumbach4243edb2017-12-13 11:38:48 +02003007 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02003008 return -EINVAL;
3009
Eyal Shapiraefed6642014-09-14 15:58:53 +03003010 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
3011 != IWL_MAX_TID_COUNT);
3012
Johannes Berg8ca151b2013-01-24 14:25:36 +01003013 spin_lock_bh(&mvmsta->lock);
3014 ssn = tid_data->ssn;
3015 queue = tid_data->txq_id;
3016 tid_data->state = IWL_AGG_ON;
Eyal Shapiraefed6642014-09-14 15:58:53 +03003017 mvmsta->agg_tids |= BIT(tid);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003018 tid_data->ssn = 0xffff;
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02003019 tid_data->amsdu_in_ampdu_allowed = amsdu;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003020 spin_unlock_bh(&mvmsta->lock);
3021
Sara Sharon34e10862017-02-23 13:15:07 +02003022 if (iwl_mvm_has_new_tx_api(mvm)) {
3023 /*
Sara Sharon0ec9257b2017-10-16 09:45:10 +03003024 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
3025 * would have failed, so if we are here there is no need to
3026 * allocate a queue.
3027 * However, if aggregation size is different than the default
3028 * size, the scheduler should be reconfigured.
3029 * We cannot do this with the new TX API, so return unsupported
3030 * for now, until it will be offloaded to firmware..
3031 * Note that if SCD default value changes - this condition
3032 * should be updated as well.
Sara Sharon34e10862017-02-23 13:15:07 +02003033 */
Sara Sharon0ec9257b2017-10-16 09:45:10 +03003034 if (buf_size < IWL_FRAME_LIMIT)
Sara Sharon34e10862017-02-23 13:15:07 +02003035 return -ENOTSUPP;
3036
3037 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3038 if (ret)
3039 return -EIO;
3040 goto out;
3041 }
3042
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02003043 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
Johannes Berg8ca151b2013-01-24 14:25:36 +01003044
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02003045 spin_lock_bh(&mvm->queue_info_lock);
3046 queue_status = mvm->queue_info[queue].status;
3047 spin_unlock_bh(&mvm->queue_info_lock);
3048
Johannes Bergc8f54702017-06-19 23:50:31 +02003049 /* Maybe there is no need to even alloc a queue... */
3050 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
3051 alloc_queue = false;
Liad Kaufmancf961e12015-08-13 19:16:08 +03003052
Johannes Bergc8f54702017-06-19 23:50:31 +02003053 /*
3054 * Only reconfig the SCD for the queue if the window size has
3055 * changed from current (become smaller)
3056 */
Sara Sharon0ec9257b2017-10-16 09:45:10 +03003057 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
Liad Kaufmancf961e12015-08-13 19:16:08 +03003058 /*
Johannes Bergc8f54702017-06-19 23:50:31 +02003059 * If reconfiguring an existing queue, it first must be
3060 * drained
Liad Kaufmancf961e12015-08-13 19:16:08 +03003061 */
Johannes Bergc8f54702017-06-19 23:50:31 +02003062 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
3063 BIT(queue));
3064 if (ret) {
3065 IWL_ERR(mvm,
3066 "Error draining queue before reconfig\n");
3067 return ret;
3068 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03003069
Johannes Bergc8f54702017-06-19 23:50:31 +02003070 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
3071 mvmsta->sta_id, tid,
3072 buf_size, ssn);
3073 if (ret) {
3074 IWL_ERR(mvm,
3075 "Error reconfiguring TXQ #%d\n", queue);
3076 return ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03003077 }
3078 }
3079
3080 if (alloc_queue)
3081 iwl_mvm_enable_txq(mvm, queue,
3082 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
3083 &cfg, wdg_timeout);
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03003084
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02003085 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
3086 if (queue_status != IWL_MVM_QUEUE_SHARED) {
3087 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3088 if (ret)
3089 return -EIO;
3090 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003091
Liad Kaufman4ecafae2015-07-14 13:36:18 +03003092 /* No need to mark as reserved */
3093 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03003094 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03003095 spin_unlock_bh(&mvm->queue_info_lock);
3096
Sara Sharon34e10862017-02-23 13:15:07 +02003097out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01003098 /*
3099 * Even though in theory the peer could have different
3100 * aggregation reorder buffer sizes for different sessions,
3101 * our ucode doesn't allow for that and has a global limit
3102 * for each station. Therefore, use the minimum of all the
3103 * aggregation sessions and our default value.
3104 */
3105 mvmsta->max_agg_bufsize =
3106 min(mvmsta->max_agg_bufsize, buf_size);
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02003107 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003108
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03003109 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3110 sta->addr, tid);
3111
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02003112 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003113}
3114
Sara Sharon34e10862017-02-23 13:15:07 +02003115static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3116 struct iwl_mvm_sta *mvmsta,
Avraham Stern4b387902018-03-07 10:41:18 +02003117 struct iwl_mvm_tid_data *tid_data)
Sara Sharon34e10862017-02-23 13:15:07 +02003118{
Avraham Stern4b387902018-03-07 10:41:18 +02003119 u16 txq_id = tid_data->txq_id;
3120
Sara Sharon34e10862017-02-23 13:15:07 +02003121 if (iwl_mvm_has_new_tx_api(mvm))
3122 return;
3123
3124 spin_lock_bh(&mvm->queue_info_lock);
3125 /*
3126 * The TXQ is marked as reserved only if no traffic came through yet
3127 * This means no traffic has been sent on this TID (agg'd or not), so
3128 * we no longer have use for the queue. Since it hasn't even been
3129 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3130 * free.
3131 */
Avraham Stern4b387902018-03-07 10:41:18 +02003132 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
Sara Sharon34e10862017-02-23 13:15:07 +02003133 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
Avraham Stern4b387902018-03-07 10:41:18 +02003134 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3135 }
Sara Sharon34e10862017-02-23 13:15:07 +02003136
3137 spin_unlock_bh(&mvm->queue_info_lock);
3138}
3139
Johannes Berg8ca151b2013-01-24 14:25:36 +01003140int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3141 struct ieee80211_sta *sta, u16 tid)
3142{
Johannes Berg5b577a92013-11-14 18:20:04 +01003143 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003144 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3145 u16 txq_id;
3146 int err;
3147
Emmanuel Grumbachf9aa8dd2013-03-04 09:11:08 +02003148 /*
3149 * If mac80211 is cleaning its state, then say that we finished since
3150 * our state has been cleared anyway.
3151 */
3152 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3153 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3154 return 0;
3155 }
3156
Johannes Berg8ca151b2013-01-24 14:25:36 +01003157 spin_lock_bh(&mvmsta->lock);
3158
3159 txq_id = tid_data->txq_id;
3160
3161 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3162 mvmsta->sta_id, tid, txq_id, tid_data->state);
3163
Eyal Shapiraefed6642014-09-14 15:58:53 +03003164 mvmsta->agg_tids &= ~BIT(tid);
3165
Avraham Stern4b387902018-03-07 10:41:18 +02003166 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03003167
Johannes Berg8ca151b2013-01-24 14:25:36 +01003168 switch (tid_data->state) {
3169 case IWL_AGG_ON:
Johannes Berg9a886582013-02-15 19:25:00 +01003170 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003171
3172 IWL_DEBUG_TX_QUEUES(mvm,
3173 "ssn = %d, next_recl = %d\n",
3174 tid_data->ssn, tid_data->next_reclaimed);
3175
Johannes Berg8ca151b2013-01-24 14:25:36 +01003176 tid_data->ssn = 0xffff;
Johannes Bergf7f89e72014-08-05 15:24:44 +02003177 tid_data->state = IWL_AGG_OFF;
Johannes Bergf7f89e72014-08-05 15:24:44 +02003178 spin_unlock_bh(&mvmsta->lock);
3179
3180 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3181
3182 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
Johannes Bergf7f89e72014-08-05 15:24:44 +02003183 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003184 case IWL_AGG_STARTING:
3185 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3186 /*
3187 * The agg session has been stopped before it was set up. This
3188 * can happen when the AddBA timer times out for example.
3189 */
3190
3191 /* No barriers since we are under mutex */
3192 lockdep_assert_held(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003193
3194 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3195 tid_data->state = IWL_AGG_OFF;
3196 err = 0;
3197 break;
3198 default:
3199 IWL_ERR(mvm,
3200 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3201 mvmsta->sta_id, tid, tid_data->state);
3202 IWL_ERR(mvm,
3203 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3204 err = -EINVAL;
3205 }
3206
3207 spin_unlock_bh(&mvmsta->lock);
3208
3209 return err;
3210}
3211
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003212int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3213 struct ieee80211_sta *sta, u16 tid)
3214{
Johannes Berg5b577a92013-11-14 18:20:04 +01003215 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003216 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3217 u16 txq_id;
Johannes Bergb6658ff2013-07-24 13:55:51 +02003218 enum iwl_mvm_agg_state old_state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003219
3220 /*
3221 * First set the agg state to OFF to avoid calling
3222 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3223 */
3224 spin_lock_bh(&mvmsta->lock);
3225 txq_id = tid_data->txq_id;
3226 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3227 mvmsta->sta_id, tid, txq_id, tid_data->state);
Johannes Bergb6658ff2013-07-24 13:55:51 +02003228 old_state = tid_data->state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003229 tid_data->state = IWL_AGG_OFF;
Eyal Shapiraefed6642014-09-14 15:58:53 +03003230 mvmsta->agg_tids &= ~BIT(tid);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003231 spin_unlock_bh(&mvmsta->lock);
3232
Avraham Stern4b387902018-03-07 10:41:18 +02003233 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03003234
Johannes Bergb6658ff2013-07-24 13:55:51 +02003235 if (old_state >= IWL_AGG_ON) {
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02003236 iwl_mvm_drain_sta(mvm, mvmsta, true);
Sara Sharond6d517b2017-03-06 10:16:11 +02003237
Mordechai Goodsteind167e812017-05-10 16:42:53 +03003238 if (iwl_mvm_has_new_tx_api(mvm)) {
3239 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3240 BIT(tid), 0))
3241 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Sara Sharond6d517b2017-03-06 10:16:11 +02003242 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
Mordechai Goodsteind167e812017-05-10 16:42:53 +03003243 } else {
3244 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
3245 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Sara Sharond6d517b2017-03-06 10:16:11 +02003246 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
Mordechai Goodsteind167e812017-05-10 16:42:53 +03003247 }
Sara Sharond6d517b2017-03-06 10:16:11 +02003248
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02003249 iwl_mvm_drain_sta(mvm, mvmsta, false);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003250
Johannes Bergf7f89e72014-08-05 15:24:44 +02003251 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
Johannes Bergb6658ff2013-07-24 13:55:51 +02003252 }
3253
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003254 return 0;
3255}
3256
Johannes Berg8ca151b2013-01-24 14:25:36 +01003257static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3258{
Johannes Berg2dc2a152015-06-16 17:09:18 +02003259 int i, max = -1, max_offs = -1;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003260
3261 lockdep_assert_held(&mvm->mutex);
3262
Johannes Berg2dc2a152015-06-16 17:09:18 +02003263 /* Pick the unused key offset with the highest 'deleted'
3264 * counter. Every time a key is deleted, all the counters
3265 * are incremented and the one that was just deleted is
3266 * reset to zero. Thus, the highest counter is the one
3267 * that was deleted longest ago. Pick that one.
3268 */
3269 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3270 if (test_bit(i, mvm->fw_key_table))
3271 continue;
3272 if (mvm->fw_key_deleted[i] > max) {
3273 max = mvm->fw_key_deleted[i];
3274 max_offs = i;
3275 }
3276 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003277
Johannes Berg2dc2a152015-06-16 17:09:18 +02003278 if (max_offs < 0)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003279 return STA_KEY_IDX_INVALID;
3280
Johannes Berg2dc2a152015-06-16 17:09:18 +02003281 return max_offs;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003282}
3283
Johannes Berg5f7a1842015-12-11 09:36:10 +01003284static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3285 struct ieee80211_vif *vif,
3286 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003287{
Johannes Berg5b530e92014-12-23 16:00:17 +01003288 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003289
Johannes Berg5f7a1842015-12-11 09:36:10 +01003290 if (sta)
3291 return iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003292
3293 /*
3294 * The device expects GTKs for station interfaces to be
3295 * installed as GTKs for the AP station. If we have no
3296 * station ID, then use AP's station ID.
3297 */
3298 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02003299 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Avri Altman9513c5e2015-10-19 16:29:11 +02003300 u8 sta_id = mvmvif->ap_sta_id;
3301
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03003302 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3303 lockdep_is_held(&mvm->mutex));
3304
Avri Altman9513c5e2015-10-19 16:29:11 +02003305 /*
3306 * It is possible that the 'sta' parameter is NULL,
3307 * for example when a GTK is removed - the sta_id will then
3308 * be the AP ID, and no station was passed by mac80211.
3309 */
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03003310 if (IS_ERR_OR_NULL(sta))
3311 return NULL;
3312
3313 return iwl_mvm_sta_from_mac80211(sta);
Avri Altman9513c5e2015-10-19 16:29:11 +02003314 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003315
Johannes Berg5f7a1842015-12-11 09:36:10 +01003316 return NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003317}
3318
3319static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
David Spinadel85aeb582017-03-30 19:43:53 +03003320 u32 sta_id,
Sara Sharon45c458b2016-11-09 15:43:26 +02003321 struct ieee80211_key_conf *key, bool mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003322 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003323 u8 key_offset, bool mfp)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003324{
Sara Sharon45c458b2016-11-09 15:43:26 +02003325 union {
3326 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3327 struct iwl_mvm_add_sta_key_cmd cmd;
3328 } u = {};
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003329 __le16 key_flags;
Johannes Berg79920742014-11-03 15:43:04 +01003330 int ret;
3331 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003332 u16 keyidx;
Sara Sharon45c458b2016-11-09 15:43:26 +02003333 u64 pn = 0;
3334 int i, size;
3335 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3336 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003337
David Spinadel85aeb582017-03-30 19:43:53 +03003338 if (sta_id == IWL_MVM_INVALID_STA)
3339 return -EINVAL;
3340
Sara Sharon45c458b2016-11-09 15:43:26 +02003341 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
Johannes Berg8ca151b2013-01-24 14:25:36 +01003342 STA_KEY_FLG_KEYID_MSK;
3343 key_flags = cpu_to_le16(keyidx);
3344 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3345
Sara Sharon45c458b2016-11-09 15:43:26 +02003346 switch (key->cipher) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003347 case WLAN_CIPHER_SUITE_TKIP:
3348 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003349 if (new_api) {
3350 memcpy((void *)&u.cmd.tx_mic_key,
3351 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3352 IWL_MIC_KEY_SIZE);
3353
3354 memcpy((void *)&u.cmd.rx_mic_key,
3355 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3356 IWL_MIC_KEY_SIZE);
3357 pn = atomic64_read(&key->tx_pn);
3358
3359 } else {
3360 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3361 for (i = 0; i < 5; i++)
3362 u.cmd_v1.tkip_rx_ttak[i] =
3363 cpu_to_le16(tkip_p1k[i]);
3364 }
3365 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003366 break;
3367 case WLAN_CIPHER_SUITE_CCMP:
3368 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
Sara Sharon45c458b2016-11-09 15:43:26 +02003369 memcpy(u.cmd.common.key, key->key, key->keylen);
3370 if (new_api)
3371 pn = atomic64_read(&key->tx_pn);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003372 break;
Johannes Bergba3943b2014-11-12 23:54:48 +01003373 case WLAN_CIPHER_SUITE_WEP104:
3374 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
John W. Linvilleaa0cb082015-01-12 16:18:11 -05003375 /* fall through */
Johannes Bergba3943b2014-11-12 23:54:48 +01003376 case WLAN_CIPHER_SUITE_WEP40:
3377 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003378 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
Johannes Bergba3943b2014-11-12 23:54:48 +01003379 break;
Ayala Beker2a53d162016-04-07 16:21:57 +03003380 case WLAN_CIPHER_SUITE_GCMP_256:
3381 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3382 /* fall through */
3383 case WLAN_CIPHER_SUITE_GCMP:
3384 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003385 memcpy(u.cmd.common.key, key->key, key->keylen);
3386 if (new_api)
3387 pn = atomic64_read(&key->tx_pn);
Ayala Beker2a53d162016-04-07 16:21:57 +03003388 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003389 default:
Max Stepanove36e5432013-08-27 19:56:13 +03003390 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
Sara Sharon45c458b2016-11-09 15:43:26 +02003391 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003392 }
3393
Johannes Bergba3943b2014-11-12 23:54:48 +01003394 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003395 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003396 if (mfp)
3397 key_flags |= cpu_to_le16(STA_KEY_MFP);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003398
Sara Sharon45c458b2016-11-09 15:43:26 +02003399 u.cmd.common.key_offset = key_offset;
3400 u.cmd.common.key_flags = key_flags;
David Spinadel85aeb582017-03-30 19:43:53 +03003401 u.cmd.common.sta_id = sta_id;
Sara Sharon45c458b2016-11-09 15:43:26 +02003402
3403 if (new_api) {
3404 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3405 size = sizeof(u.cmd);
3406 } else {
3407 size = sizeof(u.cmd_v1);
3408 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003409
3410 status = ADD_STA_SUCCESS;
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003411 if (cmd_flags & CMD_ASYNC)
Sara Sharon45c458b2016-11-09 15:43:26 +02003412 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3413 &u.cmd);
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003414 else
Sara Sharon45c458b2016-11-09 15:43:26 +02003415 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3416 &u.cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003417
3418 switch (status) {
3419 case ADD_STA_SUCCESS:
3420 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3421 break;
3422 default:
3423 ret = -EIO;
3424 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3425 break;
3426 }
3427
3428 return ret;
3429}
3430
3431static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3432 struct ieee80211_key_conf *keyconf,
3433 u8 sta_id, bool remove_key)
3434{
3435 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3436
3437 /* verify the key details match the required command's expectations */
Ayala Beker8e160ab2016-04-11 11:37:38 +03003438 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3439 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3440 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3441 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3442 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3443 return -EINVAL;
3444
3445 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3446 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
Johannes Berg8ca151b2013-01-24 14:25:36 +01003447 return -EINVAL;
3448
3449 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3450 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3451
3452 if (remove_key) {
3453 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3454 } else {
3455 struct ieee80211_key_seq seq;
3456 const u8 *pn;
3457
Ayala Bekeraa950522016-06-01 00:28:09 +03003458 switch (keyconf->cipher) {
3459 case WLAN_CIPHER_SUITE_AES_CMAC:
3460 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3461 break;
Ayala Beker8e160ab2016-04-11 11:37:38 +03003462 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3463 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3464 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3465 break;
Ayala Bekeraa950522016-06-01 00:28:09 +03003466 default:
3467 return -EINVAL;
3468 }
3469
Ayala Beker8e160ab2016-04-11 11:37:38 +03003470 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3471 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3472 igtk_cmd.ctrl_flags |=
3473 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003474 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3475 pn = seq.aes_cmac.pn;
3476 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3477 ((u64) pn[4] << 8) |
3478 ((u64) pn[3] << 16) |
3479 ((u64) pn[2] << 24) |
3480 ((u64) pn[1] << 32) |
3481 ((u64) pn[0] << 40));
3482 }
3483
3484 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3485 remove_key ? "removing" : "installing",
3486 igtk_cmd.sta_id);
3487
Ayala Beker8e160ab2016-04-11 11:37:38 +03003488 if (!iwl_mvm_has_new_rx_api(mvm)) {
3489 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3490 .ctrl_flags = igtk_cmd.ctrl_flags,
3491 .key_id = igtk_cmd.key_id,
3492 .sta_id = igtk_cmd.sta_id,
3493 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3494 };
3495
3496 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3497 ARRAY_SIZE(igtk_cmd_v1.igtk));
3498 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3499 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3500 }
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003501 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003502 sizeof(igtk_cmd), &igtk_cmd);
3503}
3504
3505
3506static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3507 struct ieee80211_vif *vif,
3508 struct ieee80211_sta *sta)
3509{
Johannes Berg5b530e92014-12-23 16:00:17 +01003510 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003511
3512 if (sta)
3513 return sta->addr;
3514
3515 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02003516 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003517 u8 sta_id = mvmvif->ap_sta_id;
3518 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3519 lockdep_is_held(&mvm->mutex));
3520 return sta->addr;
3521 }
3522
3523
3524 return NULL;
3525}
3526
Johannes Berg2f6319d2014-11-12 23:39:56 +01003527static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3528 struct ieee80211_vif *vif,
3529 struct ieee80211_sta *sta,
Johannes Bergba3943b2014-11-12 23:54:48 +01003530 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003531 u8 key_offset,
Johannes Bergba3943b2014-11-12 23:54:48 +01003532 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003533{
Johannes Berg8ca151b2013-01-24 14:25:36 +01003534 int ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003535 const u8 *addr;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003536 struct ieee80211_key_seq seq;
3537 u16 p1k[5];
David Spinadel85aeb582017-03-30 19:43:53 +03003538 u32 sta_id;
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003539 bool mfp = false;
David Spinadel85aeb582017-03-30 19:43:53 +03003540
3541 if (sta) {
3542 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3543
3544 sta_id = mvm_sta->sta_id;
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003545 mfp = sta->mfp;
David Spinadel85aeb582017-03-30 19:43:53 +03003546 } else if (vif->type == NL80211_IFTYPE_AP &&
3547 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3548 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3549
3550 sta_id = mvmvif->mcast_sta.sta_id;
3551 } else {
3552 IWL_ERR(mvm, "Failed to find station id\n");
3553 return -EINVAL;
3554 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003555
Johannes Berg8ca151b2013-01-24 14:25:36 +01003556 switch (keyconf->cipher) {
3557 case WLAN_CIPHER_SUITE_TKIP:
3558 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3559 /* get phase 1 key from mac80211 */
3560 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3561 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
David Spinadel85aeb582017-03-30 19:43:53 +03003562 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003563 seq.tkip.iv32, p1k, 0, key_offset,
3564 mfp);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003565 break;
3566 case WLAN_CIPHER_SUITE_CCMP:
Johannes Bergba3943b2014-11-12 23:54:48 +01003567 case WLAN_CIPHER_SUITE_WEP40:
3568 case WLAN_CIPHER_SUITE_WEP104:
Ayala Beker2a53d162016-04-07 16:21:57 +03003569 case WLAN_CIPHER_SUITE_GCMP:
3570 case WLAN_CIPHER_SUITE_GCMP_256:
David Spinadel85aeb582017-03-30 19:43:53 +03003571 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003572 0, NULL, 0, key_offset, mfp);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003573 break;
3574 default:
David Spinadel85aeb582017-03-30 19:43:53 +03003575 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003576 0, NULL, 0, key_offset, mfp);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003577 }
3578
Johannes Berg8ca151b2013-01-24 14:25:36 +01003579 return ret;
3580}
3581
Johannes Berg2f6319d2014-11-12 23:39:56 +01003582static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
Johannes Bergba3943b2014-11-12 23:54:48 +01003583 struct ieee80211_key_conf *keyconf,
3584 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003585{
Sara Sharon45c458b2016-11-09 15:43:26 +02003586 union {
3587 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3588 struct iwl_mvm_add_sta_key_cmd cmd;
3589 } u = {};
3590 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3591 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003592 __le16 key_flags;
Sara Sharon45c458b2016-11-09 15:43:26 +02003593 int ret, size;
Johannes Berg79920742014-11-03 15:43:04 +01003594 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003595
Sara Sharone4f13ad2018-01-15 13:50:59 +02003596 /* This is a valid situation for GTK removal */
David Spinadel85aeb582017-03-30 19:43:53 +03003597 if (sta_id == IWL_MVM_INVALID_STA)
Sara Sharone4f13ad2018-01-15 13:50:59 +02003598 return 0;
David Spinadel85aeb582017-03-30 19:43:53 +03003599
Emmanuel Grumbach8115efb2013-02-05 10:08:35 +02003600 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3601 STA_KEY_FLG_KEYID_MSK);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003602 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3603 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3604
Johannes Bergba3943b2014-11-12 23:54:48 +01003605 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003606 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3607
Sara Sharon45c458b2016-11-09 15:43:26 +02003608 /*
3609 * The fields assigned here are in the same location at the start
3610 * of the command, so we can do this union trick.
3611 */
3612 u.cmd.common.key_flags = key_flags;
3613 u.cmd.common.key_offset = keyconf->hw_key_idx;
3614 u.cmd.common.sta_id = sta_id;
3615
3616 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003617
Johannes Berg8ca151b2013-01-24 14:25:36 +01003618 status = ADD_STA_SUCCESS;
Sara Sharon45c458b2016-11-09 15:43:26 +02003619 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3620 &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003621
3622 switch (status) {
3623 case ADD_STA_SUCCESS:
3624 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3625 break;
3626 default:
3627 ret = -EIO;
3628 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3629 break;
3630 }
3631
3632 return ret;
3633}
3634
Johannes Berg2f6319d2014-11-12 23:39:56 +01003635int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3636 struct ieee80211_vif *vif,
3637 struct ieee80211_sta *sta,
3638 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003639 u8 key_offset)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003640{
Johannes Bergba3943b2014-11-12 23:54:48 +01003641 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003642 struct iwl_mvm_sta *mvm_sta;
David Spinadel85aeb582017-03-30 19:43:53 +03003643 u8 sta_id = IWL_MVM_INVALID_STA;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003644 int ret;
Matti Gottlieb11828db2015-06-01 15:15:11 +03003645 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
Johannes Berg2f6319d2014-11-12 23:39:56 +01003646
3647 lockdep_assert_held(&mvm->mutex);
3648
David Spinadel85aeb582017-03-30 19:43:53 +03003649 if (vif->type != NL80211_IFTYPE_AP ||
3650 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3651 /* Get the station id from the mvm local station table */
3652 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3653 if (!mvm_sta) {
3654 IWL_ERR(mvm, "Failed to find station\n");
Johannes Berg2f6319d2014-11-12 23:39:56 +01003655 return -EINVAL;
3656 }
David Spinadel85aeb582017-03-30 19:43:53 +03003657 sta_id = mvm_sta->sta_id;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003658
David Spinadel85aeb582017-03-30 19:43:53 +03003659 /*
3660 * It is possible that the 'sta' parameter is NULL, and thus
Beni Leve829b172018-02-20 13:41:54 +02003661 * there is a need to retrieve the sta from the local station
David Spinadel85aeb582017-03-30 19:43:53 +03003662 * table.
3663 */
3664 if (!sta) {
3665 sta = rcu_dereference_protected(
3666 mvm->fw_id_to_mac_id[sta_id],
3667 lockdep_is_held(&mvm->mutex));
3668 if (IS_ERR_OR_NULL(sta)) {
3669 IWL_ERR(mvm, "Invalid station id\n");
3670 return -EINVAL;
3671 }
3672 }
3673
3674 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3675 return -EINVAL;
Beni Leve829b172018-02-20 13:41:54 +02003676 } else {
3677 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3678
3679 sta_id = mvmvif->mcast_sta.sta_id;
3680 }
3681
3682 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3683 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3684 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3685 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3686 goto end;
David Spinadel85aeb582017-03-30 19:43:53 +03003687 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01003688
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003689 /* If the key_offset is not pre-assigned, we need to find a
3690 * new offset to use. In normal cases, the offset is not
3691 * pre-assigned, but during HW_RESTART we want to reuse the
3692 * same indices, so we pass them when this function is called.
3693 *
3694 * In D3 entry, we need to hardcoded the indices (because the
3695 * firmware hardcodes the PTK offset to 0). In this case, we
3696 * need to make sure we don't overwrite the hw_key_idx in the
3697 * keyconf structure, because otherwise we cannot configure
3698 * the original ones back when resuming.
3699 */
3700 if (key_offset == STA_KEY_IDX_INVALID) {
3701 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3702 if (key_offset == STA_KEY_IDX_INVALID)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003703 return -ENOSPC;
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003704 keyconf->hw_key_idx = key_offset;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003705 }
3706
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003707 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003708 if (ret)
Johannes Bergba3943b2014-11-12 23:54:48 +01003709 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003710
3711 /*
3712 * For WEP, the same key is used for multicast and unicast. Upload it
3713 * again, using the same key offset, and now pointing the other one
3714 * to the same key slot (offset).
3715 * If this fails, remove the original as well.
3716 */
David Spinadel85aeb582017-03-30 19:43:53 +03003717 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3718 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3719 sta) {
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003720 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3721 key_offset, !mcast);
Johannes Bergba3943b2014-11-12 23:54:48 +01003722 if (ret) {
Johannes Bergba3943b2014-11-12 23:54:48 +01003723 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003724 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003725 }
3726 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01003727
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003728 __set_bit(key_offset, mvm->fw_key_table);
3729
Johannes Berg2f6319d2014-11-12 23:39:56 +01003730end:
3731 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3732 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
Matti Gottlieb11828db2015-06-01 15:15:11 +03003733 sta ? sta->addr : zero_addr, ret);
Johannes Berg2f6319d2014-11-12 23:39:56 +01003734 return ret;
3735}
3736
3737int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3738 struct ieee80211_vif *vif,
3739 struct ieee80211_sta *sta,
3740 struct ieee80211_key_conf *keyconf)
3741{
Johannes Bergba3943b2014-11-12 23:54:48 +01003742 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003743 struct iwl_mvm_sta *mvm_sta;
Sara Sharon0ae98812017-01-04 14:53:58 +02003744 u8 sta_id = IWL_MVM_INVALID_STA;
Johannes Berg2dc2a152015-06-16 17:09:18 +02003745 int ret, i;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003746
3747 lockdep_assert_held(&mvm->mutex);
3748
Johannes Berg5f7a1842015-12-11 09:36:10 +01003749 /* Get the station from the mvm local station table */
3750 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
Luca Coelho71793b7d2017-03-30 12:04:47 +03003751 if (mvm_sta)
3752 sta_id = mvm_sta->sta_id;
David Spinadel85aeb582017-03-30 19:43:53 +03003753 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3754 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3755
Johannes Berg2f6319d2014-11-12 23:39:56 +01003756
3757 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3758 keyconf->keyidx, sta_id);
3759
Luca Coelho71793b7d2017-03-30 12:04:47 +03003760 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3761 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3762 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
Johannes Berg2f6319d2014-11-12 23:39:56 +01003763 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3764
3765 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3766 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3767 keyconf->hw_key_idx);
3768 return -ENOENT;
3769 }
3770
Johannes Berg2dc2a152015-06-16 17:09:18 +02003771 /* track which key was deleted last */
3772 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3773 if (mvm->fw_key_deleted[i] < U8_MAX)
3774 mvm->fw_key_deleted[i]++;
3775 }
3776 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3777
David Spinadel85aeb582017-03-30 19:43:53 +03003778 if (sta && !mvm_sta) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01003779 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3780 return 0;
3781 }
3782
Johannes Bergba3943b2014-11-12 23:54:48 +01003783 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3784 if (ret)
3785 return ret;
3786
3787 /* delete WEP key twice to get rid of (now useless) offset */
3788 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3789 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3790 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3791
3792 return ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003793}
3794
Johannes Berg8ca151b2013-01-24 14:25:36 +01003795void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3796 struct ieee80211_vif *vif,
3797 struct ieee80211_key_conf *keyconf,
3798 struct ieee80211_sta *sta, u32 iv32,
3799 u16 *phase1key)
3800{
Beni Levc3eb5362013-02-06 17:22:18 +02003801 struct iwl_mvm_sta *mvm_sta;
Johannes Bergba3943b2014-11-12 23:54:48 +01003802 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003803 bool mfp = sta ? sta->mfp : false;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003804
Beni Levc3eb5362013-02-06 17:22:18 +02003805 rcu_read_lock();
3806
Johannes Berg5f7a1842015-12-11 09:36:10 +01003807 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3808 if (WARN_ON_ONCE(!mvm_sta))
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003809 goto unlock;
David Spinadel85aeb582017-03-30 19:43:53 +03003810 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003811 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3812 mfp);
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003813
3814 unlock:
Beni Levc3eb5362013-02-06 17:22:18 +02003815 rcu_read_unlock();
Johannes Berg8ca151b2013-01-24 14:25:36 +01003816}
3817
Johannes Berg9cc40712013-02-15 22:47:48 +01003818void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3819 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003820{
Johannes Berg5b577a92013-11-14 18:20:04 +01003821 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003822 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003823 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003824 .sta_id = mvmsta->sta_id,
Emmanuel Grumbach5af01772013-06-09 12:59:24 +03003825 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
Johannes Berg9cc40712013-02-15 22:47:48 +01003826 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003827 };
3828 int ret;
3829
Sara Sharon854c5702016-01-26 13:17:47 +02003830 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3831 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003832 if (ret)
3833 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3834}
3835
Johannes Berg9cc40712013-02-15 22:47:48 +01003836void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3837 struct ieee80211_sta *sta,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003838 enum ieee80211_frame_release_type reason,
Johannes Berg3e56ead2013-02-15 22:23:18 +01003839 u16 cnt, u16 tids, bool more_data,
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003840 bool single_sta_queue)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003841{
Johannes Berg5b577a92013-11-14 18:20:04 +01003842 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003843 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003844 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003845 .sta_id = mvmsta->sta_id,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003846 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3847 .sleep_tx_count = cpu_to_le16(cnt),
Johannes Berg9cc40712013-02-15 22:47:48 +01003848 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003849 };
Johannes Berg3e56ead2013-02-15 22:23:18 +01003850 int tid, ret;
3851 unsigned long _tids = tids;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003852
Johannes Berg3e56ead2013-02-15 22:23:18 +01003853 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3854 * Note that this field is reserved and unused by firmware not
3855 * supporting GO uAPSD, so it's safe to always do this.
3856 */
3857 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3858 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3859
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003860 /* If we're releasing frames from aggregation or dqa queues then check
3861 * if all the queues that we're releasing frames from, combined, have:
Johannes Berg3e56ead2013-02-15 22:23:18 +01003862 * - more frames than the service period, in which case more_data
3863 * needs to be set
3864 * - fewer than 'cnt' frames, in which case we need to adjust the
3865 * firmware command (but do that unconditionally)
3866 */
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003867 if (single_sta_queue) {
Johannes Berg3e56ead2013-02-15 22:23:18 +01003868 int remaining = cnt;
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003869 int sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003870
3871 spin_lock_bh(&mvmsta->lock);
3872 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3873 struct iwl_mvm_tid_data *tid_data;
3874 u16 n_queued;
3875
3876 tid_data = &mvmsta->tid_data[tid];
Johannes Berg3e56ead2013-02-15 22:23:18 +01003877
Liad Kaufmandd321622017-04-05 16:25:11 +03003878 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003879 if (n_queued > remaining) {
3880 more_data = true;
3881 remaining = 0;
3882 break;
3883 }
3884 remaining -= n_queued;
3885 }
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003886 sleep_tx_count = cnt - remaining;
3887 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3888 mvmsta->sleep_tx_count = sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003889 spin_unlock_bh(&mvmsta->lock);
3890
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003891 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003892 if (WARN_ON(cnt - remaining == 0)) {
3893 ieee80211_sta_eosp(sta);
3894 return;
3895 }
3896 }
3897
3898 /* Note: this is ignored by firmware not supporting GO uAPSD */
3899 if (more_data)
Sara Sharonced19f22017-02-06 19:09:32 +02003900 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003901
3902 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3903 mvmsta->next_status_eosp = true;
Sara Sharonced19f22017-02-06 19:09:32 +02003904 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003905 } else {
Sara Sharonced19f22017-02-06 19:09:32 +02003906 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003907 }
3908
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +02003909 /* block the Tx queues until the FW updated the sleep Tx count */
3910 iwl_trans_block_txq_ptrs(mvm->trans, true);
3911
3912 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3913 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
Sara Sharon854c5702016-01-26 13:17:47 +02003914 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003915 if (ret)
3916 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3917}
Johannes Berg3e56ead2013-02-15 22:23:18 +01003918
Johannes Berg04168412015-06-23 21:22:09 +02003919void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3920 struct iwl_rx_cmd_buffer *rxb)
Johannes Berg3e56ead2013-02-15 22:23:18 +01003921{
3922 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3923 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3924 struct ieee80211_sta *sta;
3925 u32 sta_id = le32_to_cpu(notif->sta_id);
3926
3927 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
Johannes Berg04168412015-06-23 21:22:09 +02003928 return;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003929
3930 rcu_read_lock();
3931 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3932 if (!IS_ERR_OR_NULL(sta))
3933 ieee80211_sta_eosp(sta);
3934 rcu_read_unlock();
Johannes Berg3e56ead2013-02-15 22:23:18 +01003935}
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003936
3937void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3938 struct iwl_mvm_sta *mvmsta, bool disable)
3939{
3940 struct iwl_mvm_add_sta_cmd cmd = {
3941 .add_modify = STA_MODE_MODIFY,
3942 .sta_id = mvmsta->sta_id,
3943 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3944 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3945 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3946 };
3947 int ret;
3948
Sara Sharon854c5702016-01-26 13:17:47 +02003949 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3950 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003951 if (ret)
3952 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3953}
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003954
3955void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3956 struct ieee80211_sta *sta,
3957 bool disable)
3958{
3959 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3960
3961 spin_lock_bh(&mvm_sta->lock);
3962
3963 if (mvm_sta->disable_tx == disable) {
3964 spin_unlock_bh(&mvm_sta->lock);
3965 return;
3966 }
3967
3968 mvm_sta->disable_tx = disable;
3969
Johannes Bergc8f54702017-06-19 23:50:31 +02003970 /* Tell mac80211 to start/stop queuing tx for this station */
3971 ieee80211_sta_block_awake(mvm->hw, sta, disable);
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003972
3973 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3974
3975 spin_unlock_bh(&mvm_sta->lock);
3976}
3977
Sara Sharonced19f22017-02-06 19:09:32 +02003978static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3979 struct iwl_mvm_vif *mvmvif,
3980 struct iwl_mvm_int_sta *sta,
3981 bool disable)
3982{
3983 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3984 struct iwl_mvm_add_sta_cmd cmd = {
3985 .add_modify = STA_MODE_MODIFY,
3986 .sta_id = sta->sta_id,
3987 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3988 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3989 .mac_id_n_color = cpu_to_le32(id),
3990 };
3991 int ret;
3992
3993 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3994 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3995 if (ret)
3996 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3997}
3998
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003999void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
4000 struct iwl_mvm_vif *mvmvif,
4001 bool disable)
4002{
4003 struct ieee80211_sta *sta;
4004 struct iwl_mvm_sta *mvm_sta;
4005 int i;
4006
4007 lockdep_assert_held(&mvm->mutex);
4008
4009 /* Block/unblock all the stations of the given mvmvif */
Sara Sharon0ae98812017-01-04 14:53:58 +02004010 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03004011 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
4012 lockdep_is_held(&mvm->mutex));
4013 if (IS_ERR_OR_NULL(sta))
4014 continue;
4015
4016 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4017 if (mvm_sta->mac_id_n_color !=
4018 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
4019 continue;
4020
4021 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
4022 }
Sara Sharonced19f22017-02-06 19:09:32 +02004023
4024 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
4025 return;
4026
4027 /* Need to block/unblock also multicast station */
4028 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
4029 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4030 &mvmvif->mcast_sta, disable);
4031
4032 /*
4033 * Only unblock the broadcast station (FW blocks it for immediate
4034 * quiet, not the driver)
4035 */
4036 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
4037 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4038 &mvmvif->bcast_sta, disable);
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03004039}
Luciano Coelhodc88b4b2014-11-10 11:10:14 +02004040
4041void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
4042{
4043 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4044 struct iwl_mvm_sta *mvmsta;
4045
4046 rcu_read_lock();
4047
4048 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
4049
4050 if (!WARN_ON(!mvmsta))
4051 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
4052
4053 rcu_read_unlock();
4054}
Liad Kaufmandd321622017-04-05 16:25:11 +03004055
4056u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
4057{
4058 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
4059
4060 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02004061 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
Liad Kaufmandd321622017-04-05 16:25:11 +03004062 * to align the wrap around of ssn so we compare relevant values.
4063 */
4064 if (mvm->trans->cfg->gen2)
4065 sn &= 0xff;
4066
4067 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
4068}