blob: 5b6ec7de80431fa8841dcca575250b98fa1440fc [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03008 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020010 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Emmanuel Grumbach48831452018-01-29 10:00:05 +020011 * Copyright(c) 2018 Intel Corporation
Johannes Berg8ca151b2013-01-24 14:25:36 +010012 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
Johannes Berg8ca151b2013-01-24 14:25:36 +010022 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020023 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010024 *
25 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020026 * Intel Linux Wireless <linuxwifi@intel.com>
Johannes Berg8ca151b2013-01-24 14:25:36 +010027 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *
29 * BSD LICENSE
30 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +030031 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020033 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Liad Kaufmanbd8f3fc2018-01-17 15:25:28 +020034 * Copyright(c) 2018 Intel Corporation
Johannes Berg8ca151b2013-01-24 14:25:36 +010035 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
46 * distribution.
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *
63 *****************************************************************************/
64#include <net/mac80211.h>
65
66#include "mvm.h"
67#include "sta.h"
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +030068#include "rs.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010069
Avraham Stern337bfc92018-06-04 15:10:18 +030070static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm);
71
72static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
73 u32 sta_id,
74 struct ieee80211_key_conf *key, bool mcast,
75 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
76 u8 key_offset, bool mfp);
77
Sara Sharon854c5702016-01-26 13:17:47 +020078/*
79 * New version of ADD_STA_sta command added new fields at the end of the
80 * structure, so sending the size of the relevant API's structure is enough to
81 * support both API versions.
82 */
83static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
84{
Sara Sharonced19f22017-02-06 19:09:32 +020085 if (iwl_mvm_has_new_rx_api(mvm) ||
86 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
87 return sizeof(struct iwl_mvm_add_sta_cmd);
88 else
89 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
Sara Sharon854c5702016-01-26 13:17:47 +020090}
91
Eliad Pellerb92e6612014-01-23 17:58:23 +020092static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
93 enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +010094{
95 int sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +020096 u32 reserved_ids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +010097
Eliad Pellerb92e6612014-01-23 17:58:23 +020098 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
Johannes Berg8ca151b2013-01-24 14:25:36 +010099 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
100
101 lockdep_assert_held(&mvm->mutex);
102
Eliad Pellerb92e6612014-01-23 17:58:23 +0200103 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
104 if (iftype != NL80211_IFTYPE_STATION)
105 reserved_ids = BIT(0);
106
Johannes Berg8ca151b2013-01-24 14:25:36 +0100107 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
Sara Sharon0ae98812017-01-04 14:53:58 +0200108 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
Eliad Pellerb92e6612014-01-23 17:58:23 +0200109 if (BIT(sta_id) & reserved_ids)
110 continue;
111
Johannes Berg8ca151b2013-01-24 14:25:36 +0100112 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
113 lockdep_is_held(&mvm->mutex)))
114 return sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +0200115 }
Sara Sharon0ae98812017-01-04 14:53:58 +0200116 return IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100117}
118
Johannes Berg7a453972013-02-12 13:10:44 +0100119/* send station add/update command to firmware */
120int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Liad Kaufman24afba72015-07-28 18:56:08 +0300121 bool update, unsigned int flags)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100122{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +0100123 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300124 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
125 .sta_id = mvm_sta->sta_id,
126 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
127 .add_modify = update ? 1 : 0,
128 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
Naftali Goldstein8addabf2017-07-27 04:53:55 +0300129 STA_FLG_MIMO_EN_MSK |
130 STA_FLG_RTS_MIMO_PROT),
Liad Kaufmancf0cda12015-09-24 10:44:12 +0200131 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300132 };
Johannes Berg8ca151b2013-01-24 14:25:36 +0100133 int ret;
134 u32 status;
135 u32 agg_size = 0, mpdu_dens = 0;
136
Sara Sharonced19f22017-02-06 19:09:32 +0200137 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
138 add_sta_cmd.station_type = mvm_sta->sta_type;
139
Liad Kaufman24afba72015-07-28 18:56:08 +0300140 if (!update || (flags & STA_MODIFY_QUEUES)) {
Johannes Berg7a453972013-02-12 13:10:44 +0100141 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
Liad Kaufman24afba72015-07-28 18:56:08 +0300142
Sara Sharonbb497012016-09-29 14:52:40 +0300143 if (!iwl_mvm_has_new_tx_api(mvm)) {
144 add_sta_cmd.tfd_queue_msk =
145 cpu_to_le32(mvm_sta->tfd_queue_msk);
146
147 if (flags & STA_MODIFY_QUEUES)
148 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
149 } else {
150 WARN_ON(flags & STA_MODIFY_QUEUES);
151 }
Johannes Berg7a453972013-02-12 13:10:44 +0100152 }
Johannes Berg5bc5aaa2013-02-12 14:35:36 +0100153
154 switch (sta->bandwidth) {
155 case IEEE80211_STA_RX_BW_160:
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
157 /* fall through */
158 case IEEE80211_STA_RX_BW_80:
159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
160 /* fall through */
161 case IEEE80211_STA_RX_BW_40:
162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
163 /* fall through */
164 case IEEE80211_STA_RX_BW_20:
165 if (sta->ht_cap.ht_supported)
166 add_sta_cmd.station_flags |=
167 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
168 break;
169 }
170
171 switch (sta->rx_nss) {
172 case 1:
173 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
174 break;
175 case 2:
176 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
177 break;
178 case 3 ... 8:
179 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
180 break;
181 }
182
183 switch (sta->smps_mode) {
184 case IEEE80211_SMPS_AUTOMATIC:
185 case IEEE80211_SMPS_NUM_MODES:
186 WARN_ON(1);
187 break;
188 case IEEE80211_SMPS_STATIC:
189 /* override NSS */
190 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
191 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
192 break;
193 case IEEE80211_SMPS_DYNAMIC:
194 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
195 break;
196 case IEEE80211_SMPS_OFF:
197 /* nothing */
198 break;
199 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100200
201 if (sta->ht_cap.ht_supported) {
202 add_sta_cmd.station_flags_msk |=
203 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
204 STA_FLG_AGG_MPDU_DENS_MSK);
205
206 mpdu_dens = sta->ht_cap.ampdu_density;
207 }
208
209 if (sta->vht_cap.vht_supported) {
210 agg_size = sta->vht_cap.cap &
211 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
212 agg_size >>=
213 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
214 } else if (sta->ht_cap.ht_supported) {
215 agg_size = sta->ht_cap.ampdu_factor;
216 }
217
218 add_sta_cmd.station_flags |=
219 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
220 add_sta_cmd.station_flags |=
221 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
Gregory Greenmand94c5a82018-04-24 06:26:41 +0300222 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
Johannes Berg6ea29ce2016-12-01 16:25:30 +0100223 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100224
Johannes Berg65e25482016-04-13 14:24:22 +0200225 if (sta->wme) {
226 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
227
228 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200229 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
Johannes Berg65e25482016-04-13 14:24:22 +0200230 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200231 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
Johannes Berg65e25482016-04-13 14:24:22 +0200232 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200233 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
Johannes Berg65e25482016-04-13 14:24:22 +0200234 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200235 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
236 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
Emmanuel Grumbache71ca5e2017-02-08 14:53:32 +0200237 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
Johannes Berg65e25482016-04-13 14:24:22 +0200238 }
239
Johannes Berg8ca151b2013-01-24 14:25:36 +0100240 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +0200241 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
242 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +0300243 &add_sta_cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100244 if (ret)
245 return ret;
246
Sara Sharon837c4da2016-01-07 16:50:45 +0200247 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100248 case ADD_STA_SUCCESS:
249 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
250 break;
251 default:
252 ret = -EIO;
253 IWL_ERR(mvm, "ADD_STA failed\n");
254 break;
255 }
256
257 return ret;
258}
259
Kees Cook8cef5342017-10-24 02:29:37 -0700260static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
Sara Sharon10b2b202016-03-20 16:23:41 +0200261{
Kees Cook8cef5342017-10-24 02:29:37 -0700262 struct iwl_mvm_baid_data *data =
263 from_timer(data, t, session_timer);
264 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
Sara Sharon10b2b202016-03-20 16:23:41 +0200265 struct iwl_mvm_baid_data *ba_data;
266 struct ieee80211_sta *sta;
267 struct iwl_mvm_sta *mvm_sta;
268 unsigned long timeout;
269
270 rcu_read_lock();
271
272 ba_data = rcu_dereference(*rcu_ptr);
273
274 if (WARN_ON(!ba_data))
275 goto unlock;
276
277 if (!ba_data->timeout)
278 goto unlock;
279
280 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
281 if (time_is_after_jiffies(timeout)) {
282 mod_timer(&ba_data->session_timer, timeout);
283 goto unlock;
284 }
285
286 /* Timer expired */
287 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
Emmanuel Grumbach61dd8a82017-06-12 15:10:09 +0300288
289 /*
290 * sta should be valid unless the following happens:
291 * The firmware asserts which triggers a reconfig flow, but
292 * the reconfig fails before we set the pointer to sta into
293 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
294 * A-MDPU and hence the timer continues to run. Then, the
295 * timer expires and sta is NULL.
296 */
297 if (!sta)
298 goto unlock;
299
Sara Sharon10b2b202016-03-20 16:23:41 +0200300 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Naftali Goldstein20fc6902017-07-11 10:07:32 +0300301 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
302 sta->addr, ba_data->tid);
Sara Sharon10b2b202016-03-20 16:23:41 +0200303unlock:
304 rcu_read_unlock();
305}
306
Liad Kaufman9794c642015-08-19 17:34:28 +0300307/* Disable aggregations for a bitmap of TIDs for a given station */
308static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
309 unsigned long disable_agg_tids,
310 bool remove_queue)
311{
312 struct iwl_mvm_add_sta_cmd cmd = {};
313 struct ieee80211_sta *sta;
314 struct iwl_mvm_sta *mvmsta;
315 u32 status;
316 u8 sta_id;
317 int ret;
318
Sara Sharonbb497012016-09-29 14:52:40 +0300319 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
320 return -EINVAL;
321
Liad Kaufman9794c642015-08-19 17:34:28 +0300322 spin_lock_bh(&mvm->queue_info_lock);
323 sta_id = mvm->queue_info[queue].ra_sta_id;
324 spin_unlock_bh(&mvm->queue_info_lock);
325
326 rcu_read_lock();
327
328 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
329
330 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
331 rcu_read_unlock();
332 return -EINVAL;
333 }
334
335 mvmsta = iwl_mvm_sta_from_mac80211(sta);
336
337 mvmsta->tid_disable_agg |= disable_agg_tids;
338
339 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
340 cmd.sta_id = mvmsta->sta_id;
341 cmd.add_modify = STA_MODE_MODIFY;
342 cmd.modify_mask = STA_MODIFY_QUEUES;
343 if (disable_agg_tids)
344 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
345 if (remove_queue)
346 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
347 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
348 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
349
350 rcu_read_unlock();
351
352 /* Notify FW of queue removal from the STA queues */
353 status = ADD_STA_SUCCESS;
354 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
355 iwl_mvm_add_sta_cmd_size(mvm),
356 &cmd, &status);
357
358 return ret;
359}
360
Johannes Berg99448a82018-07-04 11:38:34 +0200361static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
362 int mac80211_queue, u8 tid, u8 flags)
363{
364 struct iwl_scd_txq_cfg_cmd cmd = {
365 .scd_queue = queue,
366 .action = SCD_CFG_DISABLE_QUEUE,
367 };
368 bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
369 int ret;
370
371 if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
372 return -EINVAL;
373
374 if (iwl_mvm_has_new_tx_api(mvm)) {
375 spin_lock_bh(&mvm->queue_info_lock);
376
377 if (remove_mac_queue)
378 mvm->hw_queue_to_mac80211[queue] &=
379 ~BIT(mac80211_queue);
380
381 spin_unlock_bh(&mvm->queue_info_lock);
382
383 iwl_trans_txq_free(mvm->trans, queue);
384
385 return 0;
386 }
387
388 spin_lock_bh(&mvm->queue_info_lock);
389
Johannes Berg1c140892018-07-04 11:58:28 +0200390 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) {
Johannes Berg99448a82018-07-04 11:38:34 +0200391 spin_unlock_bh(&mvm->queue_info_lock);
392 return 0;
393 }
394
395 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
396
397 /*
398 * If there is another TID with the same AC - don't remove the MAC queue
399 * from the mapping
400 */
401 if (tid < IWL_MAX_TID_COUNT) {
402 unsigned long tid_bitmap =
403 mvm->queue_info[queue].tid_bitmap;
404 int ac = tid_to_mac80211_ac[tid];
405 int i;
406
407 for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
408 if (tid_to_mac80211_ac[i] == ac)
409 remove_mac_queue = false;
410 }
411 }
412
413 if (remove_mac_queue)
414 mvm->hw_queue_to_mac80211[queue] &=
415 ~BIT(mac80211_queue);
Johannes Berg99448a82018-07-04 11:38:34 +0200416
Johannes Berg1c140892018-07-04 11:58:28 +0200417 cmd.action = mvm->queue_info[queue].tid_bitmap ?
Johannes Berg99448a82018-07-04 11:38:34 +0200418 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
419 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
420 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
421
422 IWL_DEBUG_TX_QUEUES(mvm,
Johannes Berg1c140892018-07-04 11:58:28 +0200423 "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
Johannes Berg99448a82018-07-04 11:38:34 +0200424 queue,
Johannes Berg1c140892018-07-04 11:58:28 +0200425 mvm->queue_info[queue].tid_bitmap,
Johannes Berg99448a82018-07-04 11:38:34 +0200426 mvm->hw_queue_to_mac80211[queue]);
427
428 /* If the queue is still enabled - nothing left to do in this func */
429 if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
430 spin_unlock_bh(&mvm->queue_info_lock);
431 return 0;
432 }
433
434 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
435 cmd.tid = mvm->queue_info[queue].txq_tid;
436
437 /* Make sure queue info is correct even though we overwrite it */
Johannes Berg1c140892018-07-04 11:58:28 +0200438 WARN(mvm->queue_info[queue].tid_bitmap ||
Johannes Berg99448a82018-07-04 11:38:34 +0200439 mvm->hw_queue_to_mac80211[queue],
Johannes Berg1c140892018-07-04 11:58:28 +0200440 "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n",
441 queue, mvm->hw_queue_to_mac80211[queue],
Johannes Berg99448a82018-07-04 11:38:34 +0200442 mvm->queue_info[queue].tid_bitmap);
443
444 /* If we are here - the queue is freed and we can zero out these vals */
Johannes Berg99448a82018-07-04 11:38:34 +0200445 mvm->queue_info[queue].tid_bitmap = 0;
446 mvm->hw_queue_to_mac80211[queue] = 0;
447
448 /* Regardless if this is a reserved TXQ for a STA - mark it as false */
449 mvm->queue_info[queue].reserved = false;
450
451 spin_unlock_bh(&mvm->queue_info_lock);
452
453 iwl_trans_txq_disable(mvm->trans, queue, false);
454 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
455 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
456
457 if (ret)
458 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
459 queue, ret);
460 return ret;
461}
462
Liad Kaufman42db09c2016-05-02 14:01:14 +0300463static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
464{
465 struct ieee80211_sta *sta;
466 struct iwl_mvm_sta *mvmsta;
467 unsigned long tid_bitmap;
468 unsigned long agg_tids = 0;
Sharon Dvir806911d2017-05-21 12:09:49 +0300469 u8 sta_id;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300470 int tid;
471
472 lockdep_assert_held(&mvm->mutex);
473
Sara Sharonbb497012016-09-29 14:52:40 +0300474 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
475 return -EINVAL;
476
Liad Kaufman42db09c2016-05-02 14:01:14 +0300477 spin_lock_bh(&mvm->queue_info_lock);
478 sta_id = mvm->queue_info[queue].ra_sta_id;
479 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
480 spin_unlock_bh(&mvm->queue_info_lock);
481
482 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
483 lockdep_is_held(&mvm->mutex));
484
485 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
486 return -EINVAL;
487
488 mvmsta = iwl_mvm_sta_from_mac80211(sta);
489
490 spin_lock_bh(&mvmsta->lock);
491 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
492 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
493 agg_tids |= BIT(tid);
494 }
495 spin_unlock_bh(&mvmsta->lock);
496
497 return agg_tids;
498}
499
Liad Kaufman9794c642015-08-19 17:34:28 +0300500/*
501 * Remove a queue from a station's resources.
502 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
503 * doesn't disable the queue
504 */
505static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
506{
507 struct ieee80211_sta *sta;
508 struct iwl_mvm_sta *mvmsta;
509 unsigned long tid_bitmap;
510 unsigned long disable_agg_tids = 0;
511 u8 sta_id;
512 int tid;
513
514 lockdep_assert_held(&mvm->mutex);
515
Sara Sharonbb497012016-09-29 14:52:40 +0300516 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
517 return -EINVAL;
518
Liad Kaufman9794c642015-08-19 17:34:28 +0300519 spin_lock_bh(&mvm->queue_info_lock);
520 sta_id = mvm->queue_info[queue].ra_sta_id;
521 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
522 spin_unlock_bh(&mvm->queue_info_lock);
523
524 rcu_read_lock();
525
526 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
527
528 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
529 rcu_read_unlock();
530 return 0;
531 }
532
533 mvmsta = iwl_mvm_sta_from_mac80211(sta);
534
535 spin_lock_bh(&mvmsta->lock);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300536 /* Unmap MAC queues and TIDs from this queue */
Liad Kaufman9794c642015-08-19 17:34:28 +0300537 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300538 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
539 disable_agg_tids |= BIT(tid);
Sara Sharon6862fce2017-02-22 19:34:17 +0200540 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman9794c642015-08-19 17:34:28 +0300541 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300542
Liad Kaufman42db09c2016-05-02 14:01:14 +0300543 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
Liad Kaufman9794c642015-08-19 17:34:28 +0300544 spin_unlock_bh(&mvmsta->lock);
545
546 rcu_read_unlock();
547
Liad Kaufman9794c642015-08-19 17:34:28 +0300548 return disable_agg_tids;
549}
550
Sara Sharon01796ff2016-11-16 17:04:36 +0200551static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
552 bool same_sta)
553{
554 struct iwl_mvm_sta *mvmsta;
555 u8 txq_curr_ac, sta_id, tid;
556 unsigned long disable_agg_tids = 0;
557 int ret;
558
559 lockdep_assert_held(&mvm->mutex);
560
Sara Sharonbb497012016-09-29 14:52:40 +0300561 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
562 return -EINVAL;
563
Sara Sharon01796ff2016-11-16 17:04:36 +0200564 spin_lock_bh(&mvm->queue_info_lock);
565 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
566 sta_id = mvm->queue_info[queue].ra_sta_id;
567 tid = mvm->queue_info[queue].txq_tid;
568 spin_unlock_bh(&mvm->queue_info_lock);
569
570 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
Sharon Dvire3df1e42017-02-21 10:41:31 +0200571 if (WARN_ON(!mvmsta))
572 return -EINVAL;
Sara Sharon01796ff2016-11-16 17:04:36 +0200573
574 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
575 /* Disable the queue */
576 if (disable_agg_tids)
577 iwl_mvm_invalidate_sta_queue(mvm, queue,
578 disable_agg_tids, false);
579
580 ret = iwl_mvm_disable_txq(mvm, queue,
581 mvmsta->vif->hw_queue[txq_curr_ac],
582 tid, 0);
583 if (ret) {
584 /* Re-mark the inactive queue as inactive */
585 spin_lock_bh(&mvm->queue_info_lock);
586 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
587 spin_unlock_bh(&mvm->queue_info_lock);
588 IWL_ERR(mvm,
589 "Failed to free inactive queue %d (ret=%d)\n",
590 queue, ret);
591
592 return ret;
593 }
594
595 /* If TXQ is allocated to another STA, update removal in FW */
596 if (!same_sta)
597 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
598
599 return 0;
600}
601
Liad Kaufman42db09c2016-05-02 14:01:14 +0300602static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
603 unsigned long tfd_queue_mask, u8 ac)
604{
605 int queue = 0;
606 u8 ac_to_queue[IEEE80211_NUM_ACS];
607 int i;
608
609 lockdep_assert_held(&mvm->queue_info_lock);
Sara Sharonbb497012016-09-29 14:52:40 +0300610 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
611 return -EINVAL;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300612
613 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
614
615 /* See what ACs the existing queues for this STA have */
616 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
617 /* Only DATA queues can be shared */
618 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
619 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
620 continue;
621
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200622 /* Don't try and take queues being reconfigured */
623 if (mvm->queue_info[queue].status ==
624 IWL_MVM_QUEUE_RECONFIGURING)
625 continue;
626
Liad Kaufman42db09c2016-05-02 14:01:14 +0300627 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
628 }
629
630 /*
631 * The queue to share is chosen only from DATA queues as follows (in
632 * descending priority):
633 * 1. An AC_BE queue
634 * 2. Same AC queue
635 * 3. Highest AC queue that is lower than new AC
636 * 4. Any existing AC (there always is at least 1 DATA queue)
637 */
638
639 /* Priority 1: An AC_BE queue */
640 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
641 queue = ac_to_queue[IEEE80211_AC_BE];
642 /* Priority 2: Same AC queue */
643 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
644 queue = ac_to_queue[ac];
645 /* Priority 3a: If new AC is VO and VI exists - use VI */
646 else if (ac == IEEE80211_AC_VO &&
647 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
648 queue = ac_to_queue[IEEE80211_AC_VI];
649 /* Priority 3b: No BE so only AC less than the new one is BK */
650 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
651 queue = ac_to_queue[IEEE80211_AC_BK];
652 /* Priority 4a: No BE nor BK - use VI if exists */
653 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
654 queue = ac_to_queue[IEEE80211_AC_VI];
655 /* Priority 4b: No BE, BK nor VI - use VO if exists */
656 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
657 queue = ac_to_queue[IEEE80211_AC_VO];
658
659 /* Make sure queue found (or not) is legal */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200660 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
661 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
662 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
Liad Kaufman42db09c2016-05-02 14:01:14 +0300663 IWL_ERR(mvm, "No DATA queues available to share\n");
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200664 return -ENOSPC;
665 }
666
667 /* Make sure the queue isn't in the middle of being reconfigured */
668 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
669 IWL_ERR(mvm,
670 "TXQ %d is in the middle of re-config - try again\n",
671 queue);
672 return -EBUSY;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300673 }
674
675 return queue;
676}
677
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200678/*
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200679 * If a given queue has a higher AC than the TID stream that is being compared
680 * to, the queue needs to be redirected to the lower AC. This function does that
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200681 * in such a case, otherwise - if no redirection required - it does nothing,
682 * unless the %force param is true.
683 */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200684int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
685 int ac, int ssn, unsigned int wdg_timeout,
686 bool force)
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200687{
688 struct iwl_scd_txq_cfg_cmd cmd = {
689 .scd_queue = queue,
Liad Kaufmanf7c692d2016-03-08 10:41:32 +0200690 .action = SCD_CFG_DISABLE_QUEUE,
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200691 };
692 bool shared_queue;
693 unsigned long mq;
694 int ret;
695
Sara Sharonbb497012016-09-29 14:52:40 +0300696 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
697 return -EINVAL;
698
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200699 /*
700 * If the AC is lower than current one - FIFO needs to be redirected to
701 * the lowest one of the streams in the queue. Check if this is needed
702 * here.
703 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
704 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
705 * we need to check if the numerical value of X is LARGER than of Y.
706 */
707 spin_lock_bh(&mvm->queue_info_lock);
708 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
709 spin_unlock_bh(&mvm->queue_info_lock);
710
711 IWL_DEBUG_TX_QUEUES(mvm,
712 "No redirection needed on TXQ #%d\n",
713 queue);
714 return 0;
715 }
716
717 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
718 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200719 cmd.tid = mvm->queue_info[queue].txq_tid;
Sara Sharon34e10862017-02-23 13:15:07 +0200720 mq = mvm->hw_queue_to_mac80211[queue];
Johannes Berg1c140892018-07-04 11:58:28 +0200721 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200722 spin_unlock_bh(&mvm->queue_info_lock);
723
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200724 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200725 queue, iwl_mvm_ac_to_tx_fifo[ac]);
726
727 /* Stop MAC queues and wait for this queue to empty */
728 iwl_mvm_stop_mac_queues(mvm, mq);
Sara Sharona1a57872017-03-05 11:38:58 +0200729 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200730 if (ret) {
731 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
732 queue);
733 ret = -EIO;
734 goto out;
735 }
736
737 /* Before redirecting the queue we need to de-activate it */
738 iwl_trans_txq_disable(mvm->trans, queue, false);
739 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
740 if (ret)
741 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
742 ret);
743
744 /* Make sure the SCD wrptr is correctly set before reconfiguring */
Sara Sharonca3b9c62016-06-30 16:14:02 +0300745 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200746
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200747 /* Update the TID "owner" of the queue */
748 spin_lock_bh(&mvm->queue_info_lock);
749 mvm->queue_info[queue].txq_tid = tid;
750 spin_unlock_bh(&mvm->queue_info_lock);
751
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200752 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
753
754 /* Redirect to lower AC */
755 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
Sara Sharon0ec9257b2017-10-16 09:45:10 +0300756 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200757
758 /* Update AC marking of the queue */
759 spin_lock_bh(&mvm->queue_info_lock);
760 mvm->queue_info[queue].mac80211_ac = ac;
761 spin_unlock_bh(&mvm->queue_info_lock);
762
763 /*
764 * Mark queue as shared in transport if shared
765 * Note this has to be done after queue enablement because enablement
766 * can also set this value, and there is no indication there to shared
767 * queues
768 */
769 if (shared_queue)
770 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
771
772out:
773 /* Continue using the MAC queues */
774 iwl_mvm_start_mac_queues(mvm, mq);
775
776 return ret;
777}
778
Johannes Berg99448a82018-07-04 11:38:34 +0200779static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
780 u8 minq, u8 maxq)
781{
782 int i;
783
784 lockdep_assert_held(&mvm->queue_info_lock);
785
786 /* This should not be hit with new TX path */
787 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
788 return -ENOSPC;
789
790 /* Start by looking for a free queue */
791 for (i = minq; i <= maxq; i++)
Johannes Berg1c140892018-07-04 11:58:28 +0200792 if (mvm->queue_info[i].tid_bitmap == 0 &&
Johannes Berg99448a82018-07-04 11:38:34 +0200793 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
794 return i;
795
796 return -ENOSPC;
797}
798
799static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
800 u8 sta_id, u8 tid, unsigned int timeout)
801{
802 int queue, size = IWL_DEFAULT_QUEUE_SIZE;
803
804 if (tid == IWL_MAX_TID_COUNT) {
805 tid = IWL_MGMT_TID;
806 size = IWL_MGMT_QUEUE_SIZE;
807 }
808 queue = iwl_trans_txq_alloc(mvm->trans,
809 cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
810 sta_id, tid, SCD_QUEUE_CFG, size, timeout);
811
812 if (queue < 0) {
813 IWL_DEBUG_TX_QUEUES(mvm,
814 "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
815 sta_id, tid, queue);
816 return queue;
817 }
818
819 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
820 queue, sta_id, tid);
821
822 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
823 IWL_DEBUG_TX_QUEUES(mvm,
824 "Enabling TXQ #%d (mac80211 map:0x%x)\n",
825 queue, mvm->hw_queue_to_mac80211[queue]);
826
827 return queue;
828}
829
Sara Sharon310181e2017-01-17 14:27:48 +0200830static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
831 struct ieee80211_sta *sta, u8 ac,
832 int tid)
833{
834 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
835 unsigned int wdg_timeout =
836 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
837 u8 mac_queue = mvmsta->vif->hw_queue[ac];
838 int queue = -1;
839
840 lockdep_assert_held(&mvm->mutex);
841
842 IWL_DEBUG_TX_QUEUES(mvm,
843 "Allocating queue for sta %d on tid %d\n",
844 mvmsta->sta_id, tid);
845 queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
846 wdg_timeout);
847 if (queue < 0)
848 return queue;
849
850 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
851
852 spin_lock_bh(&mvmsta->lock);
853 mvmsta->tid_data[tid].txq_id = queue;
854 mvmsta->tid_data[tid].is_tid_active = true;
Sara Sharon310181e2017-01-17 14:27:48 +0200855 spin_unlock_bh(&mvmsta->lock);
856
Sara Sharon310181e2017-01-17 14:27:48 +0200857 return 0;
858}
859
Johannes Berg99448a82018-07-04 11:38:34 +0200860static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
861 int mac80211_queue, u8 sta_id, u8 tid)
862{
863 bool enable_queue = true;
864
865 spin_lock_bh(&mvm->queue_info_lock);
866
867 /* Make sure this TID isn't already enabled */
868 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
869 spin_unlock_bh(&mvm->queue_info_lock);
870 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
871 queue, tid);
872 return false;
873 }
874
875 /* Update mappings and refcounts */
Johannes Berg1c140892018-07-04 11:58:28 +0200876 if (mvm->queue_info[queue].tid_bitmap)
Johannes Berg99448a82018-07-04 11:38:34 +0200877 enable_queue = false;
878
879 if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
880 WARN(mac80211_queue >=
881 BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
882 "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
883 mac80211_queue, queue, sta_id, tid);
884 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
885 }
886
Johannes Berg99448a82018-07-04 11:38:34 +0200887 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
888 mvm->queue_info[queue].ra_sta_id = sta_id;
889
890 if (enable_queue) {
891 if (tid != IWL_MAX_TID_COUNT)
892 mvm->queue_info[queue].mac80211_ac =
893 tid_to_mac80211_ac[tid];
894 else
895 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
896
897 mvm->queue_info[queue].txq_tid = tid;
898 }
899
900 IWL_DEBUG_TX_QUEUES(mvm,
Johannes Berg1c140892018-07-04 11:58:28 +0200901 "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
902 queue, mvm->queue_info[queue].tid_bitmap,
Johannes Berg99448a82018-07-04 11:38:34 +0200903 mvm->hw_queue_to_mac80211[queue]);
904
905 spin_unlock_bh(&mvm->queue_info_lock);
906
907 return enable_queue;
908}
909
910static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
911 int mac80211_queue, u16 ssn,
912 const struct iwl_trans_txq_scd_cfg *cfg,
913 unsigned int wdg_timeout)
914{
915 struct iwl_scd_txq_cfg_cmd cmd = {
916 .scd_queue = queue,
917 .action = SCD_CFG_ENABLE_QUEUE,
918 .window = cfg->frame_limit,
919 .sta_id = cfg->sta_id,
920 .ssn = cpu_to_le16(ssn),
921 .tx_fifo = cfg->fifo,
922 .aggregate = cfg->aggregate,
923 .tid = cfg->tid,
924 };
925 bool inc_ssn;
926
927 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
928 return false;
929
930 /* Send the enabling command if we need to */
931 if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
932 cfg->sta_id, cfg->tid))
933 return false;
934
935 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
936 NULL, wdg_timeout);
937 if (inc_ssn)
938 le16_add_cpu(&cmd.ssn, 1);
939
940 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
941 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
942
943 return inc_ssn;
944}
945
Liad Kaufman24afba72015-07-28 18:56:08 +0300946static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
947 struct ieee80211_sta *sta, u8 ac, int tid,
948 struct ieee80211_hdr *hdr)
949{
950 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
951 struct iwl_trans_txq_scd_cfg cfg = {
Emmanuel Grumbachcf6c6ea2017-06-13 13:18:48 +0300952 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
Liad Kaufman24afba72015-07-28 18:56:08 +0300953 .sta_id = mvmsta->sta_id,
954 .tid = tid,
955 .frame_limit = IWL_FRAME_LIMIT,
956 };
957 unsigned int wdg_timeout =
958 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
959 u8 mac_queue = mvmsta->vif->hw_queue[ac];
960 int queue = -1;
Sara Sharon01796ff2016-11-16 17:04:36 +0200961 bool using_inactive_queue = false, same_sta = false;
Liad Kaufman9794c642015-08-19 17:34:28 +0300962 unsigned long disable_agg_tids = 0;
963 enum iwl_mvm_agg_state queue_state;
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +0300964 bool shared_queue = false, inc_ssn;
Liad Kaufman24afba72015-07-28 18:56:08 +0300965 int ssn;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300966 unsigned long tfd_queue_mask;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300967 int ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300968
969 lockdep_assert_held(&mvm->mutex);
970
Sara Sharon310181e2017-01-17 14:27:48 +0200971 if (iwl_mvm_has_new_tx_api(mvm))
972 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
973
Liad Kaufman42db09c2016-05-02 14:01:14 +0300974 spin_lock_bh(&mvmsta->lock);
975 tfd_queue_mask = mvmsta->tfd_queue_msk;
976 spin_unlock_bh(&mvmsta->lock);
977
Liad Kaufmand2515a92016-03-23 16:31:08 +0200978 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300979
980 /*
981 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
982 * exists
983 */
984 if (!ieee80211_is_data_qos(hdr->frame_control) ||
985 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300986 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
987 IWL_MVM_DQA_MIN_MGMT_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300988 IWL_MVM_DQA_MAX_MGMT_QUEUE);
989 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
990 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
991 queue);
992
993 /* If no such queue is found, we'll use a DATA queue instead */
994 }
995
Liad Kaufman9794c642015-08-19 17:34:28 +0300996 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
997 (mvm->queue_info[mvmsta->reserved_queue].status ==
998 IWL_MVM_QUEUE_RESERVED ||
999 mvm->queue_info[mvmsta->reserved_queue].status ==
1000 IWL_MVM_QUEUE_INACTIVE)) {
Liad Kaufman24afba72015-07-28 18:56:08 +03001001 queue = mvmsta->reserved_queue;
Liad Kaufman9794c642015-08-19 17:34:28 +03001002 mvm->queue_info[queue].reserved = true;
Liad Kaufman24afba72015-07-28 18:56:08 +03001003 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1004 }
1005
1006 if (queue < 0)
Liad Kaufman9794c642015-08-19 17:34:28 +03001007 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1008 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +03001009 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufmancf961e12015-08-13 19:16:08 +03001010
1011 /*
Liad Kaufman9794c642015-08-19 17:34:28 +03001012 * Check if this queue is already allocated but inactive.
1013 * In such a case, we'll need to first free this queue before enabling
1014 * it again, so we'll mark it as reserved to make sure no new traffic
1015 * arrives on it
1016 */
1017 if (queue > 0 &&
1018 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
1019 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1020 using_inactive_queue = true;
Sara Sharon01796ff2016-11-16 17:04:36 +02001021 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
Liad Kaufman9794c642015-08-19 17:34:28 +03001022 IWL_DEBUG_TX_QUEUES(mvm,
1023 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
1024 queue, mvmsta->sta_id, tid);
1025 }
1026
Liad Kaufman42db09c2016-05-02 14:01:14 +03001027 /* No free queue - we'll have to share */
1028 if (queue <= 0) {
1029 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1030 if (queue > 0) {
1031 shared_queue = true;
1032 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1033 }
1034 }
1035
Liad Kaufman9794c642015-08-19 17:34:28 +03001036 /*
Liad Kaufmancf961e12015-08-13 19:16:08 +03001037 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1038 * to make sure no one else takes it.
1039 * This will allow avoiding re-acquiring the lock at the end of the
1040 * configuration. On error we'll mark it back as free.
1041 */
Liad Kaufman42db09c2016-05-02 14:01:14 +03001042 if ((queue > 0) && !shared_queue)
Liad Kaufmancf961e12015-08-13 19:16:08 +03001043 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman24afba72015-07-28 18:56:08 +03001044
Liad Kaufmand2515a92016-03-23 16:31:08 +02001045 spin_unlock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +03001046
Liad Kaufman42db09c2016-05-02 14:01:14 +03001047 /* This shouldn't happen - out of queues */
1048 if (WARN_ON(queue <= 0)) {
1049 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1050 tid, cfg.sta_id);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001051 return queue;
Liad Kaufman42db09c2016-05-02 14:01:14 +03001052 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001053
1054 /*
1055 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1056 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1057 * as aggregatable.
1058 * Mark all DATA queues as allowing to be aggregated at some point
1059 */
Liad Kaufmand5216a22015-08-09 15:50:51 +03001060 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1061 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +03001062
Liad Kaufman9794c642015-08-19 17:34:28 +03001063 /*
1064 * If this queue was previously inactive (idle) - we need to free it
1065 * first
1066 */
1067 if (using_inactive_queue) {
Sara Sharon01796ff2016-11-16 17:04:36 +02001068 ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
1069 if (ret)
Liad Kaufman9794c642015-08-19 17:34:28 +03001070 return ret;
Liad Kaufman9794c642015-08-19 17:34:28 +03001071 }
1072
Liad Kaufman42db09c2016-05-02 14:01:14 +03001073 IWL_DEBUG_TX_QUEUES(mvm,
1074 "Allocating %squeue #%d to sta %d on tid %d\n",
1075 shared_queue ? "shared " : "", queue,
1076 mvmsta->sta_id, tid);
1077
1078 if (shared_queue) {
1079 /* Disable any open aggs on this queue */
1080 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1081
1082 if (disable_agg_tids) {
1083 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1084 queue);
1085 iwl_mvm_invalidate_sta_queue(mvm, queue,
1086 disable_agg_tids, false);
1087 }
Liad Kaufman42db09c2016-05-02 14:01:14 +03001088 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001089
1090 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +03001091 inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
1092 ssn, &cfg, wdg_timeout);
1093 if (inc_ssn) {
1094 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1095 le16_add_cpu(&hdr->seq_ctrl, 0x10);
1096 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001097
Liad Kaufman58f2cc52015-09-30 16:44:28 +02001098 /*
1099 * Mark queue as shared in transport if shared
1100 * Note this has to be done after queue enablement because enablement
1101 * can also set this value, and there is no indication there to shared
1102 * queues
1103 */
1104 if (shared_queue)
1105 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1106
Liad Kaufman24afba72015-07-28 18:56:08 +03001107 spin_lock_bh(&mvmsta->lock);
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +03001108 /*
1109 * This looks racy, but it is not. We have only one packet for
1110 * this ra/tid in our Tx path since we stop the Qdisc when we
1111 * need to allocate a new TFD queue.
1112 */
1113 if (inc_ssn)
1114 mvmsta->tid_data[tid].seq_number += 0x10;
Liad Kaufman24afba72015-07-28 18:56:08 +03001115 mvmsta->tid_data[tid].txq_id = queue;
Liad Kaufman9794c642015-08-19 17:34:28 +03001116 mvmsta->tid_data[tid].is_tid_active = true;
Liad Kaufman24afba72015-07-28 18:56:08 +03001117 mvmsta->tfd_queue_msk |= BIT(queue);
Liad Kaufman9794c642015-08-19 17:34:28 +03001118 queue_state = mvmsta->tid_data[tid].state;
Liad Kaufman24afba72015-07-28 18:56:08 +03001119
1120 if (mvmsta->reserved_queue == queue)
1121 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1122 spin_unlock_bh(&mvmsta->lock);
1123
Liad Kaufman42db09c2016-05-02 14:01:14 +03001124 if (!shared_queue) {
1125 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1126 if (ret)
1127 goto out_err;
Liad Kaufmancf961e12015-08-13 19:16:08 +03001128
Liad Kaufman42db09c2016-05-02 14:01:14 +03001129 /* If we need to re-enable aggregations... */
1130 if (queue_state == IWL_AGG_ON) {
1131 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1132 if (ret)
1133 goto out_err;
1134 }
Liad Kaufman58f2cc52015-09-30 16:44:28 +02001135 } else {
1136 /* Redirect queue, if needed */
1137 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
1138 wdg_timeout, false);
1139 if (ret)
1140 goto out_err;
Liad Kaufman42db09c2016-05-02 14:01:14 +03001141 }
Liad Kaufman9794c642015-08-19 17:34:28 +03001142
Liad Kaufman42db09c2016-05-02 14:01:14 +03001143 return 0;
Liad Kaufmancf961e12015-08-13 19:16:08 +03001144
1145out_err:
1146 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
1147
1148 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +03001149}
1150
Liad Kaufman19aefa42016-03-08 14:29:51 +02001151static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
1152{
1153 struct iwl_scd_txq_cfg_cmd cmd = {
1154 .scd_queue = queue,
1155 .action = SCD_CFG_UPDATE_QUEUE_TID,
1156 };
Liad Kaufman19aefa42016-03-08 14:29:51 +02001157 int tid;
1158 unsigned long tid_bitmap;
1159 int ret;
1160
1161 lockdep_assert_held(&mvm->mutex);
1162
Sara Sharonbb497012016-09-29 14:52:40 +03001163 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1164 return;
1165
Liad Kaufman19aefa42016-03-08 14:29:51 +02001166 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001167 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1168 spin_unlock_bh(&mvm->queue_info_lock);
1169
1170 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
1171 return;
1172
1173 /* Find any TID for queue */
1174 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1175 cmd.tid = tid;
1176 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
1177
1178 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
Liad Kaufman341ca402016-09-18 14:51:59 +03001179 if (ret) {
Liad Kaufman19aefa42016-03-08 14:29:51 +02001180 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
1181 queue, ret);
Liad Kaufman341ca402016-09-18 14:51:59 +03001182 return;
1183 }
1184
1185 spin_lock_bh(&mvm->queue_info_lock);
1186 mvm->queue_info[queue].txq_tid = tid;
1187 spin_unlock_bh(&mvm->queue_info_lock);
1188 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
1189 queue, tid);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001190}
1191
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001192static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
1193{
1194 struct ieee80211_sta *sta;
1195 struct iwl_mvm_sta *mvmsta;
Sharon Dvir806911d2017-05-21 12:09:49 +03001196 u8 sta_id;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001197 int tid = -1;
1198 unsigned long tid_bitmap;
1199 unsigned int wdg_timeout;
1200 int ssn;
1201 int ret = true;
1202
Sara Sharonbb497012016-09-29 14:52:40 +03001203 /* queue sharing is disabled on new TX path */
1204 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1205 return;
1206
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001207 lockdep_assert_held(&mvm->mutex);
1208
1209 spin_lock_bh(&mvm->queue_info_lock);
1210 sta_id = mvm->queue_info[queue].ra_sta_id;
1211 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1212 spin_unlock_bh(&mvm->queue_info_lock);
1213
1214 /* Find TID for queue, and make sure it is the only one on the queue */
1215 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1216 if (tid_bitmap != BIT(tid)) {
1217 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
1218 queue, tid_bitmap);
1219 return;
1220 }
1221
1222 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1223 tid);
1224
1225 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1226 lockdep_is_held(&mvm->mutex));
1227
1228 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1229 return;
1230
1231 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1232 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1233
1234 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1235
1236 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
1237 tid_to_mac80211_ac[tid], ssn,
1238 wdg_timeout, true);
1239 if (ret) {
1240 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1241 return;
1242 }
1243
1244 /* If aggs should be turned back on - do it */
1245 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
Emmanuel Grumbach9cd70e82016-09-20 13:40:33 +03001246 struct iwl_mvm_add_sta_cmd cmd = {0};
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001247
1248 mvmsta->tid_disable_agg &= ~BIT(tid);
1249
1250 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1251 cmd.sta_id = mvmsta->sta_id;
1252 cmd.add_modify = STA_MODE_MODIFY;
1253 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1254 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1255 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1256
1257 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1258 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1259 if (!ret) {
1260 IWL_DEBUG_TX_QUEUES(mvm,
1261 "TXQ #%d is now aggregated again\n",
1262 queue);
1263
1264 /* Mark queue intenally as aggregating again */
1265 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1266 }
1267 }
1268
1269 spin_lock_bh(&mvm->queue_info_lock);
1270 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1271 spin_unlock_bh(&mvm->queue_info_lock);
1272}
1273
Johannes Berg99448a82018-07-04 11:38:34 +02001274/*
1275 * Remove inactive TIDs of a given queue.
1276 * If all queue TIDs are inactive - mark the queue as inactive
1277 * If only some the queue TIDs are inactive - unmap them from the queue
1278 */
1279static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1280 struct iwl_mvm_sta *mvmsta, int queue,
1281 unsigned long tid_bitmap)
1282{
1283 int tid;
1284
1285 lockdep_assert_held(&mvmsta->lock);
1286 lockdep_assert_held(&mvm->queue_info_lock);
1287
1288 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1289 return;
1290
1291 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1292 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1293 /* If some TFDs are still queued - don't mark TID as inactive */
1294 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1295 tid_bitmap &= ~BIT(tid);
1296
1297 /* Don't mark as inactive any TID that has an active BA */
1298 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1299 tid_bitmap &= ~BIT(tid);
1300 }
1301
1302 /* If all TIDs in the queue are inactive - mark queue as inactive. */
1303 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1304 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
1305
1306 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1)
1307 mvmsta->tid_data[tid].is_tid_active = false;
1308
1309 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n",
1310 queue);
1311 return;
1312 }
1313
1314 /*
1315 * If we are here, this is a shared queue and not all TIDs timed-out.
1316 * Remove the ones that did.
1317 */
1318 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1319 int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
1320
1321 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1322 mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
Johannes Berg99448a82018-07-04 11:38:34 +02001323 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1324 mvmsta->tid_data[tid].is_tid_active = false;
1325
1326 IWL_DEBUG_TX_QUEUES(mvm,
1327 "Removing inactive TID %d from shared Q:%d\n",
1328 tid, queue);
1329 }
1330
1331 IWL_DEBUG_TX_QUEUES(mvm,
1332 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1333 mvm->queue_info[queue].tid_bitmap);
1334
1335 /*
1336 * There may be different TIDs with the same mac queues, so make
1337 * sure all TIDs have existing corresponding mac queues enabled
1338 */
1339 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1340 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1341 mvm->hw_queue_to_mac80211[queue] |=
1342 BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
1343 }
1344
1345 /* If the queue is marked as shared - "unshare" it */
Johannes Berg1c140892018-07-04 11:58:28 +02001346 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
Johannes Berg99448a82018-07-04 11:38:34 +02001347 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1348 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
1349 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1350 queue);
1351 }
1352}
1353
1354static void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
1355{
Johannes Berg99448a82018-07-04 11:38:34 +02001356 unsigned long now = jiffies;
1357 int i;
1358
1359 if (iwl_mvm_has_new_tx_api(mvm))
1360 return;
1361
1362 spin_lock_bh(&mvm->queue_info_lock);
Johannes Berg99448a82018-07-04 11:38:34 +02001363
1364 rcu_read_lock();
1365
Johannes Berg459ab042018-07-04 13:06:53 +02001366 /* we skip the CMD queue below by starting at 1 */
1367 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1368
Johannes Berg99448a82018-07-04 11:38:34 +02001369 /*
1370 * If a queue times out - mark it as INACTIVE (don't remove right away
1371 * if we don't have to.) This is an optimization in case traffic comes
1372 * later, and we don't HAVE to use a currently-inactive queue
1373 */
Johannes Berg459ab042018-07-04 13:06:53 +02001374 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
Johannes Berg99448a82018-07-04 11:38:34 +02001375 struct ieee80211_sta *sta;
1376 struct iwl_mvm_sta *mvmsta;
1377 u8 sta_id;
1378 int tid;
1379 unsigned long inactive_tid_bitmap = 0;
1380 unsigned long queue_tid_bitmap;
1381
Johannes Berg99448a82018-07-04 11:38:34 +02001382 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
Johannes Berg459ab042018-07-04 13:06:53 +02001383 if (!queue_tid_bitmap)
1384 continue;
Johannes Berg99448a82018-07-04 11:38:34 +02001385
1386 /* If TXQ isn't in active use anyway - nothing to do here... */
1387 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
Johannes Berg459ab042018-07-04 13:06:53 +02001388 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
Johannes Berg99448a82018-07-04 11:38:34 +02001389 continue;
Johannes Berg99448a82018-07-04 11:38:34 +02001390
1391 /* Check to see if there are inactive TIDs on this queue */
1392 for_each_set_bit(tid, &queue_tid_bitmap,
1393 IWL_MAX_TID_COUNT + 1) {
1394 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1395 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1396 continue;
1397
1398 inactive_tid_bitmap |= BIT(tid);
1399 }
Johannes Berg99448a82018-07-04 11:38:34 +02001400
1401 /* If all TIDs are active - finish check on this queue */
1402 if (!inactive_tid_bitmap)
1403 continue;
1404
1405 /*
1406 * If we are here - the queue hadn't been served recently and is
1407 * in use
1408 */
1409
1410 sta_id = mvm->queue_info[i].ra_sta_id;
1411 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1412
1413 /*
1414 * If the STA doesn't exist anymore, it isn't an error. It could
1415 * be that it was removed since getting the queues, and in this
1416 * case it should've inactivated its queues anyway.
1417 */
1418 if (IS_ERR_OR_NULL(sta))
1419 continue;
1420
1421 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1422
Johannes Berg459ab042018-07-04 13:06:53 +02001423 /* this isn't so nice, but works OK due to the way we loop */
1424 spin_unlock(&mvm->queue_info_lock);
1425
1426 /* and we need this locking order */
1427 spin_lock(&mvmsta->lock);
Johannes Berg99448a82018-07-04 11:38:34 +02001428 spin_lock(&mvm->queue_info_lock);
1429 iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1430 inactive_tid_bitmap);
Johannes Berg459ab042018-07-04 13:06:53 +02001431 /* only unlock sta lock - we still need the queue info lock */
1432 spin_unlock(&mvmsta->lock);
Johannes Berg99448a82018-07-04 11:38:34 +02001433 }
1434
1435 rcu_read_unlock();
Johannes Berg459ab042018-07-04 13:06:53 +02001436 spin_unlock_bh(&mvm->queue_info_lock);
Johannes Berg99448a82018-07-04 11:38:34 +02001437}
1438
Liad Kaufman24afba72015-07-28 18:56:08 +03001439static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1440{
1441 if (tid == IWL_MAX_TID_COUNT)
1442 return IEEE80211_AC_VO; /* MGMT */
1443
1444 return tid_to_mac80211_ac[tid];
1445}
1446
1447static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
1448 struct ieee80211_sta *sta, int tid)
1449{
1450 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1451 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1452 struct sk_buff *skb;
1453 struct ieee80211_hdr *hdr;
1454 struct sk_buff_head deferred_tx;
1455 u8 mac_queue;
1456 bool no_queue = false; /* Marks if there is a problem with the queue */
1457 u8 ac;
1458
1459 lockdep_assert_held(&mvm->mutex);
1460
1461 skb = skb_peek(&tid_data->deferred_tx_frames);
1462 if (!skb)
1463 return;
1464 hdr = (void *)skb->data;
1465
1466 ac = iwl_mvm_tid_to_ac_queue(tid);
1467 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1468
Sara Sharon6862fce2017-02-22 19:34:17 +02001469 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
Liad Kaufman24afba72015-07-28 18:56:08 +03001470 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1471 IWL_ERR(mvm,
1472 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1473 mvmsta->sta_id, tid);
1474
1475 /*
1476 * Mark queue as problematic so later the deferred traffic is
1477 * freed, as we can do nothing with it
1478 */
1479 no_queue = true;
1480 }
1481
1482 __skb_queue_head_init(&deferred_tx);
1483
Liad Kaufmand2515a92016-03-23 16:31:08 +02001484 /* Disable bottom-halves when entering TX path */
1485 local_bh_disable();
Liad Kaufman24afba72015-07-28 18:56:08 +03001486 spin_lock(&mvmsta->lock);
1487 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
Liad Kaufmanad5de732016-09-27 16:01:10 +03001488 mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
Liad Kaufman24afba72015-07-28 18:56:08 +03001489 spin_unlock(&mvmsta->lock);
1490
Liad Kaufman24afba72015-07-28 18:56:08 +03001491 while ((skb = __skb_dequeue(&deferred_tx)))
1492 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1493 ieee80211_free_txskb(mvm->hw, skb);
1494 local_bh_enable();
1495
1496 /* Wake queue */
1497 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1498}
1499
1500void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1501{
1502 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1503 add_stream_wk);
1504 struct ieee80211_sta *sta;
1505 struct iwl_mvm_sta *mvmsta;
1506 unsigned long deferred_tid_traffic;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001507 int queue, sta_id, tid;
Liad Kaufman24afba72015-07-28 18:56:08 +03001508
Liad Kaufman9794c642015-08-19 17:34:28 +03001509 /* Check inactivity of queues */
1510 iwl_mvm_inactivity_check(mvm);
1511
Liad Kaufman24afba72015-07-28 18:56:08 +03001512 mutex_lock(&mvm->mutex);
1513
Sara Sharon34e10862017-02-23 13:15:07 +02001514 /* No queue reconfiguration in TVQM mode */
1515 if (iwl_mvm_has_new_tx_api(mvm))
1516 goto alloc_queues;
1517
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001518 /* Reconfigure queues requiring reconfiguation */
Sara Sharon34e10862017-02-23 13:15:07 +02001519 for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001520 bool reconfig;
Liad Kaufman19aefa42016-03-08 14:29:51 +02001521 bool change_owner;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001522
1523 spin_lock_bh(&mvm->queue_info_lock);
1524 reconfig = (mvm->queue_info[queue].status ==
1525 IWL_MVM_QUEUE_RECONFIGURING);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001526
1527 /*
1528 * We need to take into account a situation in which a TXQ was
1529 * allocated to TID x, and then turned shared by adding TIDs y
1530 * and z. If TID x becomes inactive and is removed from the TXQ,
1531 * ownership must be given to one of the remaining TIDs.
1532 * This is mainly because if TID x continues - a new queue can't
1533 * be allocated for it as long as it is an owner of another TXQ.
1534 */
1535 change_owner = !(mvm->queue_info[queue].tid_bitmap &
1536 BIT(mvm->queue_info[queue].txq_tid)) &&
1537 (mvm->queue_info[queue].status ==
1538 IWL_MVM_QUEUE_SHARED);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001539 spin_unlock_bh(&mvm->queue_info_lock);
1540
1541 if (reconfig)
1542 iwl_mvm_unshare_queue(mvm, queue);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001543 else if (change_owner)
1544 iwl_mvm_change_queue_owner(mvm, queue);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001545 }
1546
Sara Sharon34e10862017-02-23 13:15:07 +02001547alloc_queues:
Liad Kaufman24afba72015-07-28 18:56:08 +03001548 /* Go over all stations with deferred traffic */
1549 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1550 IWL_MVM_STATION_COUNT) {
1551 clear_bit(sta_id, mvm->sta_deferred_frames);
1552 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1553 lockdep_is_held(&mvm->mutex));
1554 if (IS_ERR_OR_NULL(sta))
1555 continue;
1556
1557 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1558 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1559
1560 for_each_set_bit(tid, &deferred_tid_traffic,
1561 IWL_MAX_TID_COUNT + 1)
1562 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1563 }
1564
1565 mutex_unlock(&mvm->mutex);
1566}
1567
1568static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001569 struct ieee80211_sta *sta,
1570 enum nl80211_iftype vif_type)
Liad Kaufman24afba72015-07-28 18:56:08 +03001571{
1572 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1573 int queue;
Sara Sharon01796ff2016-11-16 17:04:36 +02001574 bool using_inactive_queue = false, same_sta = false;
Liad Kaufman24afba72015-07-28 18:56:08 +03001575
Sara Sharon396952e2017-02-22 19:40:55 +02001576 /* queue reserving is disabled on new TX path */
1577 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1578 return 0;
1579
Liad Kaufman9794c642015-08-19 17:34:28 +03001580 /*
1581 * Check for inactive queues, so we don't reach a situation where we
1582 * can't add a STA due to a shortage in queues that doesn't really exist
1583 */
1584 iwl_mvm_inactivity_check(mvm);
1585
Liad Kaufman24afba72015-07-28 18:56:08 +03001586 spin_lock_bh(&mvm->queue_info_lock);
1587
1588 /* Make sure we have free resources for this STA */
Liad Kaufmand5216a22015-08-09 15:50:51 +03001589 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
Johannes Berg1c140892018-07-04 11:58:28 +02001590 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
Liad Kaufmancf961e12015-08-13 19:16:08 +03001591 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1592 IWL_MVM_QUEUE_FREE))
Liad Kaufmand5216a22015-08-09 15:50:51 +03001593 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1594 else
Liad Kaufman9794c642015-08-19 17:34:28 +03001595 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1596 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001597 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +03001598 if (queue < 0) {
1599 spin_unlock_bh(&mvm->queue_info_lock);
1600 IWL_ERR(mvm, "No available queues for new station\n");
1601 return -ENOSPC;
Sara Sharon01796ff2016-11-16 17:04:36 +02001602 } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
1603 /*
1604 * If this queue is already allocated but inactive we'll need to
1605 * first free this queue before enabling it again, we'll mark
1606 * it as reserved to make sure no new traffic arrives on it
1607 */
1608 using_inactive_queue = true;
1609 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
Liad Kaufman24afba72015-07-28 18:56:08 +03001610 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03001611 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman24afba72015-07-28 18:56:08 +03001612
1613 spin_unlock_bh(&mvm->queue_info_lock);
1614
1615 mvmsta->reserved_queue = queue;
1616
Sara Sharon01796ff2016-11-16 17:04:36 +02001617 if (using_inactive_queue)
1618 iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
1619
Liad Kaufman24afba72015-07-28 18:56:08 +03001620 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1621 queue, mvmsta->sta_id);
1622
1623 return 0;
1624}
1625
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001626/*
1627 * In DQA mode, after a HW restart the queues should be allocated as before, in
1628 * order to avoid race conditions when there are shared queues. This function
1629 * does the re-mapping and queue allocation.
1630 *
1631 * Note that re-enabling aggregations isn't done in this function.
1632 */
1633static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1634 struct iwl_mvm_sta *mvm_sta)
1635{
1636 unsigned int wdg_timeout =
1637 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1638 int i;
1639 struct iwl_trans_txq_scd_cfg cfg = {
1640 .sta_id = mvm_sta->sta_id,
1641 .frame_limit = IWL_FRAME_LIMIT,
1642 };
1643
Johannes Berg03c902b2016-12-02 12:03:36 +01001644 /* Make sure reserved queue is still marked as such (if allocated) */
1645 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1646 mvm->queue_info[mvm_sta->reserved_queue].status =
1647 IWL_MVM_QUEUE_RESERVED;
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001648
1649 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1650 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1651 int txq_id = tid_data->txq_id;
1652 int ac;
1653 u8 mac_queue;
1654
Sara Sharon6862fce2017-02-22 19:34:17 +02001655 if (txq_id == IWL_MVM_INVALID_QUEUE)
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001656 continue;
1657
1658 skb_queue_head_init(&tid_data->deferred_tx_frames);
1659
1660 ac = tid_to_mac80211_ac[i];
1661 mac_queue = mvm_sta->vif->hw_queue[ac];
1662
Sara Sharon310181e2017-01-17 14:27:48 +02001663 if (iwl_mvm_has_new_tx_api(mvm)) {
1664 IWL_DEBUG_TX_QUEUES(mvm,
1665 "Re-mapping sta %d tid %d\n",
1666 mvm_sta->sta_id, i);
1667 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
1668 mvm_sta->sta_id,
1669 i, wdg_timeout);
1670 tid_data->txq_id = txq_id;
Liad Kaufman5d390512017-10-17 16:26:00 +03001671
1672 /*
1673 * Since we don't set the seq number after reset, and HW
1674 * sets it now, FW reset will cause the seq num to start
1675 * at 0 again, so driver will need to update it
1676 * internally as well, so it keeps in sync with real val
1677 */
1678 tid_data->seq_number = 0;
Sara Sharon310181e2017-01-17 14:27:48 +02001679 } else {
1680 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001681
Sara Sharon310181e2017-01-17 14:27:48 +02001682 cfg.tid = i;
Emmanuel Grumbachcf6c6ea2017-06-13 13:18:48 +03001683 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
Sara Sharon310181e2017-01-17 14:27:48 +02001684 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1685 txq_id ==
1686 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001687
Sara Sharon310181e2017-01-17 14:27:48 +02001688 IWL_DEBUG_TX_QUEUES(mvm,
1689 "Re-mapping sta %d tid %d to queue %d\n",
1690 mvm_sta->sta_id, i, txq_id);
1691
1692 iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
1693 wdg_timeout);
Sara Sharon34e10862017-02-23 13:15:07 +02001694 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
Sara Sharon310181e2017-01-17 14:27:48 +02001695 }
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001696 }
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001697}
1698
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001699static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1700 struct iwl_mvm_int_sta *sta,
1701 const u8 *addr,
1702 u16 mac_id, u16 color)
1703{
1704 struct iwl_mvm_add_sta_cmd cmd;
1705 int ret;
Luca Coelho3f497de2017-09-02 11:05:22 +03001706 u32 status = ADD_STA_SUCCESS;
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001707
1708 lockdep_assert_held(&mvm->mutex);
1709
1710 memset(&cmd, 0, sizeof(cmd));
1711 cmd.sta_id = sta->sta_id;
1712 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1713 color));
1714 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1715 cmd.station_type = sta->type;
1716
1717 if (!iwl_mvm_has_new_tx_api(mvm))
1718 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1719 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1720
1721 if (addr)
1722 memcpy(cmd.addr, addr, ETH_ALEN);
1723
1724 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1725 iwl_mvm_add_sta_cmd_size(mvm),
1726 &cmd, &status);
1727 if (ret)
1728 return ret;
1729
1730 switch (status & IWL_ADD_STA_STATUS_MASK) {
1731 case ADD_STA_SUCCESS:
1732 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1733 return 0;
1734 default:
1735 ret = -EIO;
1736 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1737 status);
1738 break;
1739 }
1740 return ret;
1741}
1742
Johannes Berg8ca151b2013-01-24 14:25:36 +01001743int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1744 struct ieee80211_vif *vif,
1745 struct ieee80211_sta *sta)
1746{
1747 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001748 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharona571f5f2015-12-07 12:50:58 +02001749 struct iwl_mvm_rxq_dup_data *dup_data;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001750 int i, ret, sta_id;
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001751 bool sta_update = false;
1752 unsigned int sta_flags = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001753
1754 lockdep_assert_held(&mvm->mutex);
1755
1756 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
Eliad Pellerb92e6612014-01-23 17:58:23 +02001757 sta_id = iwl_mvm_find_free_sta_id(mvm,
1758 ieee80211_vif_type_p2p(vif));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001759 else
1760 sta_id = mvm_sta->sta_id;
1761
Sara Sharon0ae98812017-01-04 14:53:58 +02001762 if (sta_id == IWL_MVM_INVALID_STA)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001763 return -ENOSPC;
1764
1765 spin_lock_init(&mvm_sta->lock);
1766
Johannes Bergc8f54702017-06-19 23:50:31 +02001767 /* if this is a HW restart re-alloc existing queues */
1768 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001769 struct iwl_mvm_int_sta tmp_sta = {
1770 .sta_id = sta_id,
1771 .type = mvm_sta->sta_type,
1772 };
1773
1774 /*
1775 * First add an empty station since allocating
1776 * a queue requires a valid station
1777 */
1778 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1779 mvmvif->id, mvmvif->color);
1780 if (ret)
1781 goto err;
1782
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001783 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001784 sta_update = true;
1785 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001786 goto update_fw;
1787 }
1788
Johannes Berg8ca151b2013-01-24 14:25:36 +01001789 mvm_sta->sta_id = sta_id;
1790 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1791 mvmvif->color);
1792 mvm_sta->vif = vif;
Liad Kaufmana58bb462017-05-28 14:20:04 +03001793 if (!mvm->trans->cfg->gen2)
1794 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1795 else
1796 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001797 mvm_sta->tx_protection = 0;
1798 mvm_sta->tt_tx_protection = false;
Sara Sharonced19f22017-02-06 19:09:32 +02001799 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001800
1801 /* HW restart, don't assume the memory has been zeroed */
Liad Kaufman69191af2015-09-01 18:50:22 +03001802 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
Johannes Berg8ca151b2013-01-24 14:25:36 +01001803 mvm_sta->tfd_queue_msk = 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001804
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001805 /* for HW restart - reset everything but the sequence number */
Liad Kaufman24afba72015-07-28 18:56:08 +03001806 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001807 u16 seq = mvm_sta->tid_data[i].seq_number;
1808 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1809 mvm_sta->tid_data[i].seq_number = seq;
Liad Kaufman24afba72015-07-28 18:56:08 +03001810
Liad Kaufman24afba72015-07-28 18:56:08 +03001811 /*
1812 * Mark all queues for this STA as unallocated and defer TX
1813 * frames until the queue is allocated
1814 */
Sara Sharon6862fce2017-02-22 19:34:17 +02001815 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman24afba72015-07-28 18:56:08 +03001816 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001817 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001818 mvm_sta->deferred_traffic_tid_map = 0;
Eyal Shapiraefed6642014-09-14 15:58:53 +03001819 mvm_sta->agg_tids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001820
Sara Sharona571f5f2015-12-07 12:50:58 +02001821 if (iwl_mvm_has_new_rx_api(mvm) &&
1822 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Johannes Berg92c4dca2017-06-07 10:35:54 +02001823 int q;
1824
Sara Sharona571f5f2015-12-07 12:50:58 +02001825 dup_data = kcalloc(mvm->trans->num_rx_queues,
Johannes Berg92c4dca2017-06-07 10:35:54 +02001826 sizeof(*dup_data), GFP_KERNEL);
Sara Sharona571f5f2015-12-07 12:50:58 +02001827 if (!dup_data)
1828 return -ENOMEM;
Johannes Berg92c4dca2017-06-07 10:35:54 +02001829 /*
1830 * Initialize all the last_seq values to 0xffff which can never
1831 * compare equal to the frame's seq_ctrl in the check in
1832 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1833 * number and fragmented packets don't reach that function.
1834 *
1835 * This thus allows receiving a packet with seqno 0 and the
1836 * retry bit set as the very first packet on a new TID.
1837 */
1838 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1839 memset(dup_data[q].last_seq, 0xff,
1840 sizeof(dup_data[q].last_seq));
Sara Sharona571f5f2015-12-07 12:50:58 +02001841 mvm_sta->dup_data = dup_data;
1842 }
1843
Johannes Bergc8f54702017-06-19 23:50:31 +02001844 if (!iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufmand5216a22015-08-09 15:50:51 +03001845 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1846 ieee80211_vif_type_p2p(vif));
Liad Kaufman24afba72015-07-28 18:56:08 +03001847 if (ret)
1848 goto err;
1849 }
1850
Gregory Greenman9f66a392017-11-05 18:49:48 +02001851 /*
1852 * if rs is registered with mac80211, then "add station" will be handled
1853 * via the corresponding ops, otherwise need to notify rate scaling here
1854 */
Emmanuel Grumbach4243edb2017-12-13 11:38:48 +02001855 if (iwl_mvm_has_tlc_offload(mvm))
Gregory Greenman9f66a392017-11-05 18:49:48 +02001856 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1857
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001858update_fw:
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001859 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001860 if (ret)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001861 goto err;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001862
Johannes Berg9e848012014-08-04 14:33:42 +02001863 if (vif->type == NL80211_IFTYPE_STATION) {
1864 if (!sta->tdls) {
Sara Sharon0ae98812017-01-04 14:53:58 +02001865 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001866 mvmvif->ap_sta_id = sta_id;
1867 } else {
Sara Sharon0ae98812017-01-04 14:53:58 +02001868 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001869 }
1870 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001871
1872 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1873
1874 return 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001875
1876err:
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001877 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001878}
1879
1880int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1881 bool drain)
1882{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001883 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01001884 int ret;
1885 u32 status;
1886
1887 lockdep_assert_held(&mvm->mutex);
1888
1889 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1890 cmd.sta_id = mvmsta->sta_id;
1891 cmd.add_modify = STA_MODE_MODIFY;
1892 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1893 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1894
1895 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02001896 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1897 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001898 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001899 if (ret)
1900 return ret;
1901
Sara Sharon837c4da2016-01-07 16:50:45 +02001902 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001903 case ADD_STA_SUCCESS:
1904 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1905 mvmsta->sta_id);
1906 break;
1907 default:
1908 ret = -EIO;
1909 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1910 mvmsta->sta_id);
1911 break;
1912 }
1913
1914 return ret;
1915}
1916
1917/*
1918 * Remove a station from the FW table. Before sending the command to remove
1919 * the station validate that the station is indeed known to the driver (sanity
1920 * only).
1921 */
1922static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1923{
1924 struct ieee80211_sta *sta;
1925 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1926 .sta_id = sta_id,
1927 };
1928 int ret;
1929
1930 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1931 lockdep_is_held(&mvm->mutex));
1932
1933 /* Note: internal stations are marked as error values */
1934 if (!sta) {
1935 IWL_ERR(mvm, "Invalid station id\n");
1936 return -EINVAL;
1937 }
1938
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03001939 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01001940 sizeof(rm_sta_cmd), &rm_sta_cmd);
1941 if (ret) {
1942 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1943 return ret;
1944 }
1945
1946 return 0;
1947}
1948
Liad Kaufman24afba72015-07-28 18:56:08 +03001949static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1950 struct ieee80211_vif *vif,
1951 struct iwl_mvm_sta *mvm_sta)
1952{
1953 int ac;
1954 int i;
1955
1956 lockdep_assert_held(&mvm->mutex);
1957
1958 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
Sara Sharon6862fce2017-02-22 19:34:17 +02001959 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
Liad Kaufman24afba72015-07-28 18:56:08 +03001960 continue;
1961
1962 ac = iwl_mvm_tid_to_ac_queue(i);
1963 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1964 vif->hw_queue[ac], i, 0);
Sara Sharon6862fce2017-02-22 19:34:17 +02001965 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman24afba72015-07-28 18:56:08 +03001966 }
1967}
1968
Sara Sharond6d517b2017-03-06 10:16:11 +02001969int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1970 struct iwl_mvm_sta *mvm_sta)
1971{
Sharon Dvirbec95222017-06-12 11:40:33 +03001972 int i;
Sara Sharond6d517b2017-03-06 10:16:11 +02001973
1974 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1975 u16 txq_id;
Sharon Dvirbec95222017-06-12 11:40:33 +03001976 int ret;
Sara Sharond6d517b2017-03-06 10:16:11 +02001977
1978 spin_lock_bh(&mvm_sta->lock);
1979 txq_id = mvm_sta->tid_data[i].txq_id;
1980 spin_unlock_bh(&mvm_sta->lock);
1981
1982 if (txq_id == IWL_MVM_INVALID_QUEUE)
1983 continue;
1984
1985 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1986 if (ret)
Sharon Dvirbec95222017-06-12 11:40:33 +03001987 return ret;
Sara Sharond6d517b2017-03-06 10:16:11 +02001988 }
1989
Sharon Dvirbec95222017-06-12 11:40:33 +03001990 return 0;
Sara Sharond6d517b2017-03-06 10:16:11 +02001991}
1992
Johannes Berg8ca151b2013-01-24 14:25:36 +01001993int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1994 struct ieee80211_vif *vif,
1995 struct ieee80211_sta *sta)
1996{
1997 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001998 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharon94c3e612016-12-07 15:04:37 +02001999 u8 sta_id = mvm_sta->sta_id;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002000 int ret;
2001
2002 lockdep_assert_held(&mvm->mutex);
2003
Sara Sharona571f5f2015-12-07 12:50:58 +02002004 if (iwl_mvm_has_new_rx_api(mvm))
2005 kfree(mvm_sta->dup_data);
2006
Johannes Bergc8f54702017-06-19 23:50:31 +02002007 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
2008 if (ret)
2009 return ret;
Sara Sharond6d517b2017-03-06 10:16:11 +02002010
Johannes Bergc8f54702017-06-19 23:50:31 +02002011 /* flush its queues here since we are freeing mvm_sta */
2012 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
2013 if (ret)
2014 return ret;
2015 if (iwl_mvm_has_new_tx_api(mvm)) {
2016 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
2017 } else {
2018 u32 q_mask = mvm_sta->tfd_queue_msk;
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02002019
Johannes Bergc8f54702017-06-19 23:50:31 +02002020 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2021 q_mask);
2022 }
2023 if (ret)
2024 return ret;
Liad Kaufman56214742016-09-22 15:14:08 +03002025
Johannes Bergc8f54702017-06-19 23:50:31 +02002026 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
Liad Kaufmana0315dea2016-07-07 13:25:59 +03002027
Johannes Bergc8f54702017-06-19 23:50:31 +02002028 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
Liad Kaufmana0315dea2016-07-07 13:25:59 +03002029
Johannes Bergc8f54702017-06-19 23:50:31 +02002030 /* If there is a TXQ still marked as reserved - free it */
2031 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
2032 u8 reserved_txq = mvm_sta->reserved_queue;
2033 enum iwl_mvm_queue_status *status;
2034
2035 /*
2036 * If no traffic has gone through the reserved TXQ - it
2037 * is still marked as IWL_MVM_QUEUE_RESERVED, and
2038 * should be manually marked as free again
2039 */
2040 spin_lock_bh(&mvm->queue_info_lock);
2041 status = &mvm->queue_info[reserved_txq].status;
2042 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
2043 (*status != IWL_MVM_QUEUE_FREE),
2044 "sta_id %d reserved txq %d status %d",
2045 sta_id, reserved_txq, *status)) {
Liad Kaufmana0315dea2016-07-07 13:25:59 +03002046 spin_unlock_bh(&mvm->queue_info_lock);
Johannes Bergc8f54702017-06-19 23:50:31 +02002047 return -EINVAL;
Liad Kaufmana0315dea2016-07-07 13:25:59 +03002048 }
2049
Johannes Bergc8f54702017-06-19 23:50:31 +02002050 *status = IWL_MVM_QUEUE_FREE;
2051 spin_unlock_bh(&mvm->queue_info_lock);
2052 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002053
Johannes Bergc8f54702017-06-19 23:50:31 +02002054 if (vif->type == NL80211_IFTYPE_STATION &&
2055 mvmvif->ap_sta_id == sta_id) {
2056 /* if associated - we can't remove the AP STA now */
2057 if (vif->bss_conf.assoc)
2058 return ret;
Eliad Peller37577fe2013-12-05 17:19:39 +02002059
Johannes Bergc8f54702017-06-19 23:50:31 +02002060 /* unassoc - go ahead - remove the AP STA now */
2061 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
2062
2063 /* clear d0i3_ap_sta_id if no longer relevant */
2064 if (mvm->d0i3_ap_sta_id == sta_id)
2065 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002066 }
2067
2068 /*
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03002069 * This shouldn't happen - the TDLS channel switch should be canceled
2070 * before the STA is removed.
2071 */
Sara Sharon94c3e612016-12-07 15:04:37 +02002072 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
Sara Sharon0ae98812017-01-04 14:53:58 +02002073 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03002074 cancel_delayed_work(&mvm->tdls_cs.dwork);
2075 }
2076
2077 /*
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03002078 * Make sure that the tx response code sees the station as -EBUSY and
2079 * calls the drain worker.
2080 */
2081 spin_lock_bh(&mvm_sta->lock);
Johannes Bergc8f54702017-06-19 23:50:31 +02002082 spin_unlock_bh(&mvm_sta->lock);
Sara Sharon94c3e612016-12-07 15:04:37 +02002083
Johannes Bergc8f54702017-06-19 23:50:31 +02002084 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
2085 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002086
2087 return ret;
2088}
2089
2090int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
2091 struct ieee80211_vif *vif,
2092 u8 sta_id)
2093{
2094 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
2095
2096 lockdep_assert_held(&mvm->mutex);
2097
Monam Agarwalc531c772014-03-24 00:05:56 +05302098 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002099 return ret;
2100}
2101
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002102int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
2103 struct iwl_mvm_int_sta *sta,
Sara Sharonced19f22017-02-06 19:09:32 +02002104 u32 qmask, enum nl80211_iftype iftype,
2105 enum iwl_sta_type type)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002106{
Avraham Sterndf65c8d2018-03-06 14:10:49 +02002107 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
2108 sta->sta_id == IWL_MVM_INVALID_STA) {
Eliad Pellerb92e6612014-01-23 17:58:23 +02002109 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
Sara Sharon0ae98812017-01-04 14:53:58 +02002110 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01002111 return -ENOSPC;
2112 }
2113
2114 sta->tfd_queue_msk = qmask;
Sara Sharonced19f22017-02-06 19:09:32 +02002115 sta->type = type;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002116
2117 /* put a non-NULL value so iterating over the stations won't stop */
2118 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
2119 return 0;
2120}
2121
Sara Sharon26d6c162017-01-03 12:00:19 +02002122void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002123{
Monam Agarwalc531c772014-03-24 00:05:56 +05302124 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002125 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
Sara Sharon0ae98812017-01-04 14:53:58 +02002126 sta->sta_id = IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002127}
2128
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002129static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
2130 u8 sta_id, u8 fifo)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002131{
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02002132 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
2133 mvm->cfg->base_params->wd_timeout :
2134 IWL_WATCHDOG_DISABLED;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002135
Sara Sharon310181e2017-01-17 14:27:48 +02002136 if (iwl_mvm_has_new_tx_api(mvm)) {
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002137 int tvqm_queue =
2138 iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
2139 IWL_MAX_TID_COUNT,
2140 wdg_timeout);
2141 *queue = tvqm_queue;
Johannes Bergc8f54702017-06-19 23:50:31 +02002142 } else {
Liad Kaufman28d07932015-09-01 16:36:25 +03002143 struct iwl_trans_txq_scd_cfg cfg = {
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002144 .fifo = fifo,
2145 .sta_id = sta_id,
Liad Kaufman28d07932015-09-01 16:36:25 +03002146 .tid = IWL_MAX_TID_COUNT,
2147 .aggregate = false,
2148 .frame_limit = IWL_FRAME_LIMIT,
2149 };
2150
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002151 iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
Liad Kaufman28d07932015-09-01 16:36:25 +03002152 }
Sara Sharonc5a719e2016-11-15 10:20:48 +02002153}
2154
2155int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
2156{
2157 int ret;
2158
2159 lockdep_assert_held(&mvm->mutex);
2160
2161 /* Allocate aux station and assign to it the aux queue */
2162 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
Sara Sharonced19f22017-02-06 19:09:32 +02002163 NL80211_IFTYPE_UNSPECIFIED,
2164 IWL_STA_AUX_ACTIVITY);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002165 if (ret)
2166 return ret;
2167
2168 /* Map Aux queue to fifo - needs to happen before adding Aux station */
2169 if (!iwl_mvm_has_new_tx_api(mvm))
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002170 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2171 mvm->aux_sta.sta_id,
2172 IWL_MVM_TX_FIFO_MCAST);
Liad Kaufman28d07932015-09-01 16:36:25 +03002173
Johannes Berg8ca151b2013-01-24 14:25:36 +01002174 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
2175 MAC_INDEX_AUX, 0);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002176 if (ret) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002177 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002178 return ret;
2179 }
2180
2181 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02002182 * For 22000 firmware and on we cannot add queue to a station unknown
Sara Sharonc5a719e2016-11-15 10:20:48 +02002183 * to firmware so enable queue here - after the station was added
2184 */
2185 if (iwl_mvm_has_new_tx_api(mvm))
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002186 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2187 mvm->aux_sta.sta_id,
2188 IWL_MVM_TX_FIFO_MCAST);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002189
2190 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002191}
2192
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002193int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2194{
2195 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002196 int ret;
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002197
2198 lockdep_assert_held(&mvm->mutex);
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002199
2200 /* Map snif queue to fifo - must happen before adding snif station */
2201 if (!iwl_mvm_has_new_tx_api(mvm))
2202 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2203 mvm->snif_sta.sta_id,
2204 IWL_MVM_TX_FIFO_BE);
2205
2206 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002207 mvmvif->id, 0);
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002208 if (ret)
2209 return ret;
2210
2211 /*
2212 * For 22000 firmware and on we cannot add queue to a station unknown
2213 * to firmware so enable queue here - after the station was added
2214 */
2215 if (iwl_mvm_has_new_tx_api(mvm))
2216 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2217 mvm->snif_sta.sta_id,
2218 IWL_MVM_TX_FIFO_BE);
2219
2220 return 0;
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002221}
2222
2223int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2224{
2225 int ret;
2226
2227 lockdep_assert_held(&mvm->mutex);
2228
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002229 iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
2230 IWL_MAX_TID_COUNT, 0);
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002231 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2232 if (ret)
2233 IWL_WARN(mvm, "Failed sending remove station\n");
2234
2235 return ret;
2236}
2237
2238void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2239{
2240 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2241}
2242
Johannes Berg712b24a2014-08-04 14:14:14 +02002243void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
2244{
2245 lockdep_assert_held(&mvm->mutex);
2246
2247 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2248}
2249
Johannes Berg8ca151b2013-01-24 14:25:36 +01002250/*
2251 * Send the add station command for the vif's broadcast station.
2252 * Assumes that the station was already allocated.
2253 *
2254 * @mvm: the mvm component
2255 * @vif: the interface to which the broadcast station is added
2256 * @bsta: the broadcast station to add.
2257 */
Johannes Berg013290a2014-08-04 13:38:48 +02002258int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002259{
2260 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002261 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg5023d962013-07-31 14:07:43 +02002262 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
Johannes Berga4243402014-01-20 23:46:38 +01002263 const u8 *baddr = _baddr;
Johannes Berg7daa7622017-02-24 12:02:22 +01002264 int queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002265 int ret;
Sara Sharonc5a719e2016-11-15 10:20:48 +02002266 unsigned int wdg_timeout =
2267 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2268 struct iwl_trans_txq_scd_cfg cfg = {
2269 .fifo = IWL_MVM_TX_FIFO_VO,
2270 .sta_id = mvmvif->bcast_sta.sta_id,
2271 .tid = IWL_MAX_TID_COUNT,
2272 .aggregate = false,
2273 .frame_limit = IWL_FRAME_LIMIT,
2274 };
Johannes Berg8ca151b2013-01-24 14:25:36 +01002275
2276 lockdep_assert_held(&mvm->mutex);
2277
Johannes Bergc8f54702017-06-19 23:50:31 +02002278 if (!iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufman4d339982017-03-21 17:13:16 +02002279 if (vif->type == NL80211_IFTYPE_AP ||
2280 vif->type == NL80211_IFTYPE_ADHOC)
Sara Sharon49f71712017-01-09 12:07:16 +02002281 queue = mvm->probe_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002282 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
Sara Sharon49f71712017-01-09 12:07:16 +02002283 queue = mvm->p2p_dev_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002284 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
Liad Kaufmande24f632015-08-04 15:19:18 +03002285 return -EINVAL;
2286
Liad Kaufmandf88c082016-11-24 15:31:00 +02002287 bsta->tfd_queue_msk |= BIT(queue);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002288
Sara Sharon310181e2017-01-17 14:27:48 +02002289 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
2290 &cfg, wdg_timeout);
Liad Kaufmande24f632015-08-04 15:19:18 +03002291 }
2292
Johannes Berg5023d962013-07-31 14:07:43 +02002293 if (vif->type == NL80211_IFTYPE_ADHOC)
2294 baddr = vif->bss_conf.bssid;
2295
Sara Sharon0ae98812017-01-04 14:53:58 +02002296 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01002297 return -ENOSPC;
2298
Liad Kaufmandf88c082016-11-24 15:31:00 +02002299 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2300 mvmvif->id, mvmvif->color);
2301 if (ret)
2302 return ret;
2303
2304 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02002305 * For 22000 firmware and on we cannot add queue to a station unknown
Sara Sharonc5a719e2016-11-15 10:20:48 +02002306 * to firmware so enable queue here - after the station was added
Liad Kaufmandf88c082016-11-24 15:31:00 +02002307 */
Sara Sharon310181e2017-01-17 14:27:48 +02002308 if (iwl_mvm_has_new_tx_api(mvm)) {
Johannes Berg7daa7622017-02-24 12:02:22 +01002309 queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
2310 bsta->sta_id,
2311 IWL_MAX_TID_COUNT,
2312 wdg_timeout);
2313
Luca Coelho7b758a12017-06-20 13:40:03 +03002314 if (vif->type == NL80211_IFTYPE_AP ||
2315 vif->type == NL80211_IFTYPE_ADHOC)
Sara Sharon310181e2017-01-17 14:27:48 +02002316 mvm->probe_queue = queue;
2317 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2318 mvm->p2p_dev_queue = queue;
Sara Sharon310181e2017-01-17 14:27:48 +02002319 }
Liad Kaufmandf88c082016-11-24 15:31:00 +02002320
2321 return 0;
2322}
2323
2324static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2325 struct ieee80211_vif *vif)
2326{
2327 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002328 int queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002329
2330 lockdep_assert_held(&mvm->mutex);
2331
Sara Sharond49394a2017-03-05 13:01:08 +02002332 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
2333
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002334 switch (vif->type) {
2335 case NL80211_IFTYPE_AP:
2336 case NL80211_IFTYPE_ADHOC:
2337 queue = mvm->probe_queue;
2338 break;
2339 case NL80211_IFTYPE_P2P_DEVICE:
2340 queue = mvm->p2p_dev_queue;
2341 break;
2342 default:
2343 WARN(1, "Can't free bcast queue on vif type %d\n",
2344 vif->type);
2345 return;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002346 }
2347
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002348 iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
2349 if (iwl_mvm_has_new_tx_api(mvm))
2350 return;
2351
2352 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2353 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002354}
2355
2356/* Send the FW a request to remove the station from it's internal data
2357 * structures, but DO NOT remove the entry from the local data structures. */
Johannes Berg013290a2014-08-04 13:38:48 +02002358int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002359{
Johannes Berg013290a2014-08-04 13:38:48 +02002360 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002361 int ret;
2362
2363 lockdep_assert_held(&mvm->mutex);
2364
Johannes Bergc8f54702017-06-19 23:50:31 +02002365 iwl_mvm_free_bcast_sta_queues(mvm, vif);
Liad Kaufmandf88c082016-11-24 15:31:00 +02002366
Johannes Berg013290a2014-08-04 13:38:48 +02002367 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002368 if (ret)
2369 IWL_WARN(mvm, "Failed sending remove station\n");
2370 return ret;
2371}
2372
Johannes Berg013290a2014-08-04 13:38:48 +02002373int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2374{
2375 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002376
2377 lockdep_assert_held(&mvm->mutex);
2378
Johannes Bergc8f54702017-06-19 23:50:31 +02002379 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
Sara Sharonced19f22017-02-06 19:09:32 +02002380 ieee80211_vif_type_p2p(vif),
2381 IWL_STA_GENERAL_PURPOSE);
Johannes Berg013290a2014-08-04 13:38:48 +02002382}
2383
Johannes Berg8ca151b2013-01-24 14:25:36 +01002384/* Allocate a new station entry for the broadcast station to the given vif,
2385 * and send it to the FW.
2386 * Note that each P2P mac should have its own broadcast station.
2387 *
2388 * @mvm: the mvm component
2389 * @vif: the interface to which the broadcast station is added
2390 * @bsta: the broadcast station to add. */
Luca Coelhod1973582017-06-22 16:00:25 +03002391int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002392{
2393 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002394 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002395 int ret;
2396
2397 lockdep_assert_held(&mvm->mutex);
2398
Johannes Berg013290a2014-08-04 13:38:48 +02002399 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002400 if (ret)
2401 return ret;
2402
Johannes Berg013290a2014-08-04 13:38:48 +02002403 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002404
2405 if (ret)
2406 iwl_mvm_dealloc_int_sta(mvm, bsta);
Johannes Berg013290a2014-08-04 13:38:48 +02002407
Johannes Berg8ca151b2013-01-24 14:25:36 +01002408 return ret;
2409}
2410
Johannes Berg013290a2014-08-04 13:38:48 +02002411void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2412{
2413 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2414
2415 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2416}
2417
Johannes Berg8ca151b2013-01-24 14:25:36 +01002418/*
2419 * Send the FW a request to remove the station from it's internal data
2420 * structures, and in addition remove it from the local data structure.
2421 */
Luca Coelhod1973582017-06-22 16:00:25 +03002422int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002423{
2424 int ret;
2425
2426 lockdep_assert_held(&mvm->mutex);
2427
Johannes Berg013290a2014-08-04 13:38:48 +02002428 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002429
Johannes Berg013290a2014-08-04 13:38:48 +02002430 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2431
Johannes Berg8ca151b2013-01-24 14:25:36 +01002432 return ret;
2433}
2434
Sara Sharon26d6c162017-01-03 12:00:19 +02002435/*
2436 * Allocate a new station entry for the multicast station to the given vif,
2437 * and send it to the FW.
2438 * Note that each AP/GO mac should have its own multicast station.
2439 *
2440 * @mvm: the mvm component
2441 * @vif: the interface to which the multicast station is added
2442 */
2443int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2444{
2445 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2446 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2447 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2448 const u8 *maddr = _maddr;
2449 struct iwl_trans_txq_scd_cfg cfg = {
2450 .fifo = IWL_MVM_TX_FIFO_MCAST,
2451 .sta_id = msta->sta_id,
Ilan Peer6508de02018-01-25 15:22:41 +02002452 .tid = 0,
Sara Sharon26d6c162017-01-03 12:00:19 +02002453 .aggregate = false,
2454 .frame_limit = IWL_FRAME_LIMIT,
2455 };
2456 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2457 int ret;
2458
2459 lockdep_assert_held(&mvm->mutex);
2460
Liad Kaufmanee48b722017-03-21 17:13:16 +02002461 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2462 vif->type != NL80211_IFTYPE_ADHOC))
Sara Sharon26d6c162017-01-03 12:00:19 +02002463 return -ENOTSUPP;
2464
Sara Sharonced19f22017-02-06 19:09:32 +02002465 /*
Sara Sharonfc07bd82017-12-21 15:05:28 +02002466 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2467 * invalid, so make sure we use the queue we want.
2468 * Note that this is done here as we want to avoid making DQA
2469 * changes in mac80211 layer.
2470 */
2471 if (vif->type == NL80211_IFTYPE_ADHOC) {
2472 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2473 mvmvif->cab_queue = vif->cab_queue;
2474 }
2475
2476 /*
Sara Sharonced19f22017-02-06 19:09:32 +02002477 * While in previous FWs we had to exclude cab queue from TFD queue
2478 * mask, now it is needed as any other queue.
2479 */
2480 if (!iwl_mvm_has_new_tx_api(mvm) &&
2481 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2482 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2483 &cfg, timeout);
2484 msta->tfd_queue_msk |= BIT(vif->cab_queue);
2485 }
Sara Sharon26d6c162017-01-03 12:00:19 +02002486 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2487 mvmvif->id, mvmvif->color);
2488 if (ret) {
2489 iwl_mvm_dealloc_int_sta(mvm, msta);
2490 return ret;
2491 }
2492
2493 /*
2494 * Enable cab queue after the ADD_STA command is sent.
Luca Coelho2f7a3862017-11-15 15:07:34 +02002495 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
Sara Sharonced19f22017-02-06 19:09:32 +02002496 * command with unknown station id, and for FW that doesn't support
2497 * station API since the cab queue is not included in the
2498 * tfd_queue_mask.
Sara Sharon26d6c162017-01-03 12:00:19 +02002499 */
Sara Sharon310181e2017-01-17 14:27:48 +02002500 if (iwl_mvm_has_new_tx_api(mvm)) {
2501 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2502 msta->sta_id,
Ilan Peer6508de02018-01-25 15:22:41 +02002503 0,
Sara Sharon310181e2017-01-17 14:27:48 +02002504 timeout);
Sara Sharone2af3fa2017-02-22 19:35:10 +02002505 mvmvif->cab_queue = queue;
Sara Sharonced19f22017-02-06 19:09:32 +02002506 } else if (!fw_has_api(&mvm->fw->ucode_capa,
Sara Sharonfc07bd82017-12-21 15:05:28 +02002507 IWL_UCODE_TLV_API_STA_TYPE))
Sara Sharon310181e2017-01-17 14:27:48 +02002508 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2509 &cfg, timeout);
Sara Sharon26d6c162017-01-03 12:00:19 +02002510
Avraham Stern337bfc92018-06-04 15:10:18 +03002511 if (mvmvif->ap_wep_key) {
2512 u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
2513
2514 if (key_offset == STA_KEY_IDX_INVALID)
2515 return -ENOSPC;
2516
2517 ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
2518 mvmvif->ap_wep_key, 1, 0, NULL, 0,
2519 key_offset, 0);
2520 if (ret)
2521 return ret;
2522 }
2523
Sara Sharon26d6c162017-01-03 12:00:19 +02002524 return 0;
2525}
2526
2527/*
2528 * Send the FW a request to remove the station from it's internal data
2529 * structures, and in addition remove it from the local data structure.
2530 */
2531int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2532{
2533 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2534 int ret;
2535
2536 lockdep_assert_held(&mvm->mutex);
2537
Sara Sharond49394a2017-03-05 13:01:08 +02002538 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2539
Sara Sharone2af3fa2017-02-22 19:35:10 +02002540 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
Ilan Peer6508de02018-01-25 15:22:41 +02002541 0, 0);
Sara Sharon26d6c162017-01-03 12:00:19 +02002542
2543 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2544 if (ret)
2545 IWL_WARN(mvm, "Failed sending remove station\n");
2546
2547 return ret;
2548}
2549
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002550#define IWL_MAX_RX_BA_SESSIONS 16
2551
Sara Sharonb915c102016-03-23 16:32:02 +02002552static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
Sara Sharon10b2b202016-03-20 16:23:41 +02002553{
Sara Sharonb915c102016-03-23 16:32:02 +02002554 struct iwl_mvm_delba_notif notif = {
2555 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2556 .metadata.sync = 1,
2557 .delba.baid = baid,
Sara Sharon10b2b202016-03-20 16:23:41 +02002558 };
Sara Sharonb915c102016-03-23 16:32:02 +02002559 iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
2560};
Sara Sharon10b2b202016-03-20 16:23:41 +02002561
Sara Sharonb915c102016-03-23 16:32:02 +02002562static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2563 struct iwl_mvm_baid_data *data)
2564{
2565 int i;
2566
2567 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2568
2569 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2570 int j;
2571 struct iwl_mvm_reorder_buffer *reorder_buf =
2572 &data->reorder_buf[i];
Johannes Bergdfdddd92017-09-26 12:24:51 +02002573 struct iwl_mvm_reorder_buf_entry *entries =
2574 &data->entries[i * data->entries_per_queue];
Sara Sharonb915c102016-03-23 16:32:02 +02002575
Sara Sharon06904052016-02-28 20:28:17 +02002576 spin_lock_bh(&reorder_buf->lock);
2577 if (likely(!reorder_buf->num_stored)) {
2578 spin_unlock_bh(&reorder_buf->lock);
Sara Sharonb915c102016-03-23 16:32:02 +02002579 continue;
Sara Sharon06904052016-02-28 20:28:17 +02002580 }
Sara Sharonb915c102016-03-23 16:32:02 +02002581
2582 /*
2583 * This shouldn't happen in regular DELBA since the internal
2584 * delBA notification should trigger a release of all frames in
2585 * the reorder buffer.
2586 */
2587 WARN_ON(1);
2588
2589 for (j = 0; j < reorder_buf->buf_size; j++)
Johannes Bergdfdddd92017-09-26 12:24:51 +02002590 __skb_queue_purge(&entries[j].e.frames);
Sara Sharon06904052016-02-28 20:28:17 +02002591 /*
2592 * Prevent timer re-arm. This prevents a very far fetched case
2593 * where we timed out on the notification. There may be prior
2594 * RX frames pending in the RX queue before the notification
2595 * that might get processed between now and the actual deletion
2596 * and we would re-arm the timer although we are deleting the
2597 * reorder buffer.
2598 */
2599 reorder_buf->removed = true;
2600 spin_unlock_bh(&reorder_buf->lock);
2601 del_timer_sync(&reorder_buf->reorder_timer);
Sara Sharonb915c102016-03-23 16:32:02 +02002602 }
2603}
2604
2605static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
Sara Sharonb915c102016-03-23 16:32:02 +02002606 struct iwl_mvm_baid_data *data,
Luca Coelho514c30692018-06-24 11:59:54 +03002607 u16 ssn, u16 buf_size)
Sara Sharonb915c102016-03-23 16:32:02 +02002608{
2609 int i;
2610
2611 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2612 struct iwl_mvm_reorder_buffer *reorder_buf =
2613 &data->reorder_buf[i];
Johannes Bergdfdddd92017-09-26 12:24:51 +02002614 struct iwl_mvm_reorder_buf_entry *entries =
2615 &data->entries[i * data->entries_per_queue];
Sara Sharonb915c102016-03-23 16:32:02 +02002616 int j;
2617
2618 reorder_buf->num_stored = 0;
2619 reorder_buf->head_sn = ssn;
2620 reorder_buf->buf_size = buf_size;
Sara Sharon06904052016-02-28 20:28:17 +02002621 /* rx reorder timer */
Kees Cook8cef5342017-10-24 02:29:37 -07002622 timer_setup(&reorder_buf->reorder_timer,
2623 iwl_mvm_reorder_timer_expired, 0);
Sara Sharon06904052016-02-28 20:28:17 +02002624 spin_lock_init(&reorder_buf->lock);
2625 reorder_buf->mvm = mvm;
Sara Sharonb915c102016-03-23 16:32:02 +02002626 reorder_buf->queue = i;
Sara Sharon5d43eab2017-02-02 12:51:39 +02002627 reorder_buf->valid = false;
Sara Sharonb915c102016-03-23 16:32:02 +02002628 for (j = 0; j < reorder_buf->buf_size; j++)
Johannes Bergdfdddd92017-09-26 12:24:51 +02002629 __skb_queue_head_init(&entries[j].e.frames);
Sara Sharonb915c102016-03-23 16:32:02 +02002630 }
Sara Sharon10b2b202016-03-20 16:23:41 +02002631}
2632
Johannes Berg8ca151b2013-01-24 14:25:36 +01002633int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Luca Coelho514c30692018-06-24 11:59:54 +03002634 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002635{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002636 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002637 struct iwl_mvm_add_sta_cmd cmd = {};
Sara Sharon10b2b202016-03-20 16:23:41 +02002638 struct iwl_mvm_baid_data *baid_data = NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002639 int ret;
2640 u32 status;
2641
2642 lockdep_assert_held(&mvm->mutex);
2643
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002644 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2645 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2646 return -ENOSPC;
2647 }
2648
Sara Sharon10b2b202016-03-20 16:23:41 +02002649 if (iwl_mvm_has_new_rx_api(mvm) && start) {
Johannes Bergdfdddd92017-09-26 12:24:51 +02002650 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2651
2652 /* sparse doesn't like the __align() so don't check */
2653#ifndef __CHECKER__
2654 /*
2655 * The division below will be OK if either the cache line size
2656 * can be divided by the entry size (ALIGN will round up) or if
2657 * if the entry size can be divided by the cache line size, in
2658 * which case the ALIGN() will do nothing.
2659 */
2660 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2661 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2662#endif
2663
2664 /*
2665 * Upward align the reorder buffer size to fill an entire cache
2666 * line for each queue, to avoid sharing cache lines between
2667 * different queues.
2668 */
2669 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2670
Sara Sharon10b2b202016-03-20 16:23:41 +02002671 /*
2672 * Allocate here so if allocation fails we can bail out early
2673 * before starting the BA session in the firmware
2674 */
Sara Sharonb915c102016-03-23 16:32:02 +02002675 baid_data = kzalloc(sizeof(*baid_data) +
2676 mvm->trans->num_rx_queues *
Johannes Bergdfdddd92017-09-26 12:24:51 +02002677 reorder_buf_size,
Sara Sharonb915c102016-03-23 16:32:02 +02002678 GFP_KERNEL);
Sara Sharon10b2b202016-03-20 16:23:41 +02002679 if (!baid_data)
2680 return -ENOMEM;
Johannes Bergdfdddd92017-09-26 12:24:51 +02002681
2682 /*
2683 * This division is why we need the above BUILD_BUG_ON(),
2684 * if that doesn't hold then this will not be right.
2685 */
2686 baid_data->entries_per_queue =
2687 reorder_buf_size / sizeof(baid_data->entries[0]);
Sara Sharon10b2b202016-03-20 16:23:41 +02002688 }
2689
Johannes Berg8ca151b2013-01-24 14:25:36 +01002690 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2691 cmd.sta_id = mvm_sta->sta_id;
2692 cmd.add_modify = STA_MODE_MODIFY;
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002693 if (start) {
2694 cmd.add_immediate_ba_tid = (u8) tid;
2695 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
Luca Coelho514c30692018-06-24 11:59:54 +03002696 cmd.rx_ba_window = cpu_to_le16(buf_size);
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002697 } else {
2698 cmd.remove_immediate_ba_tid = (u8) tid;
2699 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002700 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2701 STA_MODIFY_REMOVE_BA_TID;
2702
2703 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002704 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2705 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002706 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002707 if (ret)
Sara Sharon10b2b202016-03-20 16:23:41 +02002708 goto out_free;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002709
Sara Sharon837c4da2016-01-07 16:50:45 +02002710 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002711 case ADD_STA_SUCCESS:
Sara Sharon35263a02016-06-21 12:12:10 +03002712 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2713 start ? "start" : "stopp");
Johannes Berg8ca151b2013-01-24 14:25:36 +01002714 break;
2715 case ADD_STA_IMMEDIATE_BA_FAILURE:
2716 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2717 ret = -ENOSPC;
2718 break;
2719 default:
2720 ret = -EIO;
2721 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2722 start ? "start" : "stopp", status);
2723 break;
2724 }
2725
Sara Sharon10b2b202016-03-20 16:23:41 +02002726 if (ret)
2727 goto out_free;
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002728
Sara Sharon10b2b202016-03-20 16:23:41 +02002729 if (start) {
2730 u8 baid;
2731
2732 mvm->rx_ba_sessions++;
2733
2734 if (!iwl_mvm_has_new_rx_api(mvm))
2735 return 0;
2736
2737 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2738 ret = -EINVAL;
2739 goto out_free;
2740 }
2741 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2742 IWL_ADD_STA_BAID_SHIFT);
2743 baid_data->baid = baid;
2744 baid_data->timeout = timeout;
2745 baid_data->last_rx = jiffies;
Kees Cook8cef5342017-10-24 02:29:37 -07002746 baid_data->rcu_ptr = &mvm->baid_map[baid];
2747 timer_setup(&baid_data->session_timer,
2748 iwl_mvm_rx_agg_session_expired, 0);
Sara Sharon10b2b202016-03-20 16:23:41 +02002749 baid_data->mvm = mvm;
2750 baid_data->tid = tid;
2751 baid_data->sta_id = mvm_sta->sta_id;
2752
2753 mvm_sta->tid_to_baid[tid] = baid;
2754 if (timeout)
2755 mod_timer(&baid_data->session_timer,
2756 TU_TO_EXP_TIME(timeout * 2));
2757
Sara Sharon3f1c4c52017-10-02 12:07:59 +03002758 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
Sara Sharon10b2b202016-03-20 16:23:41 +02002759 /*
2760 * protect the BA data with RCU to cover a case where our
2761 * internal RX sync mechanism will timeout (not that it's
2762 * supposed to happen) and we will free the session data while
2763 * RX is being processed in parallel
2764 */
Sara Sharon35263a02016-06-21 12:12:10 +03002765 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2766 mvm_sta->sta_id, tid, baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002767 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2768 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
Sara Sharon60dec522016-06-21 14:14:08 +03002769 } else {
Sara Sharon10b2b202016-03-20 16:23:41 +02002770 u8 baid = mvm_sta->tid_to_baid[tid];
2771
Sara Sharon60dec522016-06-21 14:14:08 +03002772 if (mvm->rx_ba_sessions > 0)
2773 /* check that restart flow didn't zero the counter */
2774 mvm->rx_ba_sessions--;
Sara Sharon10b2b202016-03-20 16:23:41 +02002775 if (!iwl_mvm_has_new_rx_api(mvm))
2776 return 0;
2777
2778 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2779 return -EINVAL;
2780
2781 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2782 if (WARN_ON(!baid_data))
2783 return -EINVAL;
2784
2785 /* synchronize all rx queues so we can safely delete */
Sara Sharonb915c102016-03-23 16:32:02 +02002786 iwl_mvm_free_reorder(mvm, baid_data);
Sara Sharon10b2b202016-03-20 16:23:41 +02002787 del_timer_sync(&baid_data->session_timer);
Sara Sharon10b2b202016-03-20 16:23:41 +02002788 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2789 kfree_rcu(baid_data, rcu_head);
Sara Sharon35263a02016-06-21 12:12:10 +03002790 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002791 }
2792 return 0;
2793
2794out_free:
2795 kfree(baid_data);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002796 return ret;
2797}
2798
Liad Kaufman9794c642015-08-19 17:34:28 +03002799int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2800 int tid, u8 queue, bool start)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002801{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002802 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002803 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01002804 int ret;
2805 u32 status;
2806
2807 lockdep_assert_held(&mvm->mutex);
2808
2809 if (start) {
2810 mvm_sta->tfd_queue_msk |= BIT(queue);
2811 mvm_sta->tid_disable_agg &= ~BIT(tid);
2812 } else {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002813 /* In DQA-mode the queue isn't removed on agg termination */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002814 mvm_sta->tid_disable_agg |= BIT(tid);
2815 }
2816
2817 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2818 cmd.sta_id = mvm_sta->sta_id;
2819 cmd.add_modify = STA_MODE_MODIFY;
Sara Sharonbb497012016-09-29 14:52:40 +03002820 if (!iwl_mvm_has_new_tx_api(mvm))
2821 cmd.modify_mask = STA_MODIFY_QUEUES;
2822 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002823 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2824 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2825
2826 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002827 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2828 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002829 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002830 if (ret)
2831 return ret;
2832
Sara Sharon837c4da2016-01-07 16:50:45 +02002833 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002834 case ADD_STA_SUCCESS:
2835 break;
2836 default:
2837 ret = -EIO;
2838 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2839 start ? "start" : "stopp", status);
2840 break;
2841 }
2842
2843 return ret;
2844}
2845
Emmanuel Grumbachb797e3f2014-03-06 14:49:36 +02002846const u8 tid_to_mac80211_ac[] = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002847 IEEE80211_AC_BE,
2848 IEEE80211_AC_BK,
2849 IEEE80211_AC_BK,
2850 IEEE80211_AC_BE,
2851 IEEE80211_AC_VI,
2852 IEEE80211_AC_VI,
2853 IEEE80211_AC_VO,
2854 IEEE80211_AC_VO,
Liad Kaufman9794c642015-08-19 17:34:28 +03002855 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002856};
2857
Johannes Berg3e56ead2013-02-15 22:23:18 +01002858static const u8 tid_to_ucode_ac[] = {
2859 AC_BE,
2860 AC_BK,
2861 AC_BK,
2862 AC_BE,
2863 AC_VI,
2864 AC_VI,
2865 AC_VO,
2866 AC_VO,
2867};
2868
Johannes Berg8ca151b2013-01-24 14:25:36 +01002869int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2870 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2871{
Johannes Berg5b577a92013-11-14 18:20:04 +01002872 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002873 struct iwl_mvm_tid_data *tid_data;
Liad Kaufmandd321622017-04-05 16:25:11 +03002874 u16 normalized_ssn;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002875 int txq_id;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002876 int ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002877
2878 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2879 return -EINVAL;
2880
Naftali Goldsteinbd800e42017-08-28 11:51:05 +03002881 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2882 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2883 IWL_ERR(mvm,
2884 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
Johannes Berg8ca151b2013-01-24 14:25:36 +01002885 mvmsta->tid_data[tid].state);
2886 return -ENXIO;
2887 }
2888
2889 lockdep_assert_held(&mvm->mutex);
2890
Liad Kaufmanbd8f3fc2018-01-17 15:25:28 +02002891 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2892 iwl_mvm_has_new_tx_api(mvm)) {
2893 u8 ac = tid_to_mac80211_ac[tid];
2894
2895 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2896 if (ret)
2897 return ret;
2898 }
2899
Arik Nemtsovb2492502014-03-13 12:21:50 +02002900 spin_lock_bh(&mvmsta->lock);
2901
2902 /* possible race condition - we entered D0i3 while starting agg */
2903 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2904 spin_unlock_bh(&mvmsta->lock);
2905 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2906 return -EIO;
2907 }
2908
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002909 spin_lock(&mvm->queue_info_lock);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002910
Liad Kaufmancf961e12015-08-13 19:16:08 +03002911 /*
2912 * Note the possible cases:
Avraham Stern4a6d2e52018-03-05 11:26:53 +02002913 * 1. An enabled TXQ - TXQ needs to become agg'ed
2914 * 2. The TXQ hasn't yet been enabled, so find a free one and mark
2915 * it as reserved
Liad Kaufmancf961e12015-08-13 19:16:08 +03002916 */
2917 txq_id = mvmsta->tid_data[tid].txq_id;
Avraham Stern4a6d2e52018-03-05 11:26:53 +02002918 if (txq_id == IWL_MVM_INVALID_QUEUE) {
Liad Kaufman9794c642015-08-19 17:34:28 +03002919 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
Johannes Bergc8f54702017-06-19 23:50:31 +02002920 IWL_MVM_DQA_MIN_DATA_QUEUE,
2921 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002922 if (txq_id < 0) {
2923 ret = txq_id;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002924 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2925 goto release_locks;
2926 }
2927
2928 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2929 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
Avraham Stern4a6d2e52018-03-05 11:26:53 +02002930 } else if (unlikely(mvm->queue_info[txq_id].status ==
2931 IWL_MVM_QUEUE_SHARED)) {
2932 ret = -ENXIO;
2933 IWL_DEBUG_TX_QUEUES(mvm,
2934 "Can't start tid %d agg on shared queue!\n",
2935 tid);
2936 goto release_locks;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002937 }
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002938
2939 spin_unlock(&mvm->queue_info_lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002940
Liad Kaufmancf961e12015-08-13 19:16:08 +03002941 IWL_DEBUG_TX_QUEUES(mvm,
2942 "AGG for tid %d will be on queue #%d\n",
2943 tid, txq_id);
2944
Johannes Berg8ca151b2013-01-24 14:25:36 +01002945 tid_data = &mvmsta->tid_data[tid];
Johannes Berg9a886582013-02-15 19:25:00 +01002946 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002947 tid_data->txq_id = txq_id;
2948 *ssn = tid_data->ssn;
2949
2950 IWL_DEBUG_TX_QUEUES(mvm,
2951 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2952 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2953 tid_data->next_reclaimed);
2954
Liad Kaufmandd321622017-04-05 16:25:11 +03002955 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02002956 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
Liad Kaufmandd321622017-04-05 16:25:11 +03002957 * to align the wrap around of ssn so we compare relevant values.
2958 */
2959 normalized_ssn = tid_data->ssn;
2960 if (mvm->trans->cfg->gen2)
2961 normalized_ssn &= 0xff;
2962
2963 if (normalized_ssn == tid_data->next_reclaimed) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002964 tid_data->state = IWL_AGG_STARTING;
2965 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2966 } else {
2967 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2968 }
2969
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002970 ret = 0;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002971 goto out;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002972
2973release_locks:
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002974 spin_unlock(&mvm->queue_info_lock);
2975out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01002976 spin_unlock_bh(&mvmsta->lock);
2977
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002978 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002979}
2980
2981int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
Luca Coelho514c30692018-06-24 11:59:54 +03002982 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002983 bool amsdu)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002984{
Johannes Berg5b577a92013-11-14 18:20:04 +01002985 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002986 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +02002987 unsigned int wdg_timeout =
2988 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002989 int queue, ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002990 bool alloc_queue = true;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002991 enum iwl_mvm_queue_status queue_status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002992 u16 ssn;
2993
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002994 struct iwl_trans_txq_scd_cfg cfg = {
2995 .sta_id = mvmsta->sta_id,
2996 .tid = tid,
2997 .frame_limit = buf_size,
2998 .aggregate = true,
2999 };
3000
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02003001 /*
3002 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
3003 * manager, so this function should never be called in this case.
3004 */
Emmanuel Grumbach4243edb2017-12-13 11:38:48 +02003005 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02003006 return -EINVAL;
3007
Eyal Shapiraefed6642014-09-14 15:58:53 +03003008 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
3009 != IWL_MAX_TID_COUNT);
3010
Johannes Berg8ca151b2013-01-24 14:25:36 +01003011 spin_lock_bh(&mvmsta->lock);
3012 ssn = tid_data->ssn;
3013 queue = tid_data->txq_id;
3014 tid_data->state = IWL_AGG_ON;
Eyal Shapiraefed6642014-09-14 15:58:53 +03003015 mvmsta->agg_tids |= BIT(tid);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003016 tid_data->ssn = 0xffff;
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02003017 tid_data->amsdu_in_ampdu_allowed = amsdu;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003018 spin_unlock_bh(&mvmsta->lock);
3019
Sara Sharon34e10862017-02-23 13:15:07 +02003020 if (iwl_mvm_has_new_tx_api(mvm)) {
3021 /*
Sara Sharon0ec9257b2017-10-16 09:45:10 +03003022 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
3023 * would have failed, so if we are here there is no need to
3024 * allocate a queue.
3025 * However, if aggregation size is different than the default
3026 * size, the scheduler should be reconfigured.
3027 * We cannot do this with the new TX API, so return unsupported
3028 * for now, until it will be offloaded to firmware..
3029 * Note that if SCD default value changes - this condition
3030 * should be updated as well.
Sara Sharon34e10862017-02-23 13:15:07 +02003031 */
Sara Sharon0ec9257b2017-10-16 09:45:10 +03003032 if (buf_size < IWL_FRAME_LIMIT)
Sara Sharon34e10862017-02-23 13:15:07 +02003033 return -ENOTSUPP;
3034
3035 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3036 if (ret)
3037 return -EIO;
3038 goto out;
3039 }
3040
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02003041 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
Johannes Berg8ca151b2013-01-24 14:25:36 +01003042
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02003043 spin_lock_bh(&mvm->queue_info_lock);
3044 queue_status = mvm->queue_info[queue].status;
3045 spin_unlock_bh(&mvm->queue_info_lock);
3046
Johannes Bergc8f54702017-06-19 23:50:31 +02003047 /* Maybe there is no need to even alloc a queue... */
3048 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
3049 alloc_queue = false;
Liad Kaufmancf961e12015-08-13 19:16:08 +03003050
Johannes Bergc8f54702017-06-19 23:50:31 +02003051 /*
3052 * Only reconfig the SCD for the queue if the window size has
3053 * changed from current (become smaller)
3054 */
Sara Sharon0ec9257b2017-10-16 09:45:10 +03003055 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
Liad Kaufmancf961e12015-08-13 19:16:08 +03003056 /*
Johannes Bergc8f54702017-06-19 23:50:31 +02003057 * If reconfiguring an existing queue, it first must be
3058 * drained
Liad Kaufmancf961e12015-08-13 19:16:08 +03003059 */
Johannes Bergc8f54702017-06-19 23:50:31 +02003060 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
3061 BIT(queue));
3062 if (ret) {
3063 IWL_ERR(mvm,
3064 "Error draining queue before reconfig\n");
3065 return ret;
3066 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03003067
Johannes Bergc8f54702017-06-19 23:50:31 +02003068 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
3069 mvmsta->sta_id, tid,
3070 buf_size, ssn);
3071 if (ret) {
3072 IWL_ERR(mvm,
3073 "Error reconfiguring TXQ #%d\n", queue);
3074 return ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03003075 }
3076 }
3077
3078 if (alloc_queue)
3079 iwl_mvm_enable_txq(mvm, queue,
3080 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
3081 &cfg, wdg_timeout);
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03003082
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02003083 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
3084 if (queue_status != IWL_MVM_QUEUE_SHARED) {
3085 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3086 if (ret)
3087 return -EIO;
3088 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003089
Liad Kaufman4ecafae2015-07-14 13:36:18 +03003090 /* No need to mark as reserved */
3091 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03003092 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03003093 spin_unlock_bh(&mvm->queue_info_lock);
3094
Sara Sharon34e10862017-02-23 13:15:07 +02003095out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01003096 /*
3097 * Even though in theory the peer could have different
3098 * aggregation reorder buffer sizes for different sessions,
3099 * our ucode doesn't allow for that and has a global limit
3100 * for each station. Therefore, use the minimum of all the
3101 * aggregation sessions and our default value.
3102 */
3103 mvmsta->max_agg_bufsize =
3104 min(mvmsta->max_agg_bufsize, buf_size);
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02003105 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003106
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03003107 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3108 sta->addr, tid);
3109
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02003110 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003111}
3112
Sara Sharon34e10862017-02-23 13:15:07 +02003113static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3114 struct iwl_mvm_sta *mvmsta,
Avraham Stern4b387902018-03-07 10:41:18 +02003115 struct iwl_mvm_tid_data *tid_data)
Sara Sharon34e10862017-02-23 13:15:07 +02003116{
Avraham Stern4b387902018-03-07 10:41:18 +02003117 u16 txq_id = tid_data->txq_id;
3118
Sara Sharon34e10862017-02-23 13:15:07 +02003119 if (iwl_mvm_has_new_tx_api(mvm))
3120 return;
3121
3122 spin_lock_bh(&mvm->queue_info_lock);
3123 /*
3124 * The TXQ is marked as reserved only if no traffic came through yet
3125 * This means no traffic has been sent on this TID (agg'd or not), so
3126 * we no longer have use for the queue. Since it hasn't even been
3127 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3128 * free.
3129 */
Avraham Stern4b387902018-03-07 10:41:18 +02003130 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
Sara Sharon34e10862017-02-23 13:15:07 +02003131 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
Avraham Stern4b387902018-03-07 10:41:18 +02003132 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3133 }
Sara Sharon34e10862017-02-23 13:15:07 +02003134
3135 spin_unlock_bh(&mvm->queue_info_lock);
3136}
3137
Johannes Berg8ca151b2013-01-24 14:25:36 +01003138int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3139 struct ieee80211_sta *sta, u16 tid)
3140{
Johannes Berg5b577a92013-11-14 18:20:04 +01003141 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003142 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3143 u16 txq_id;
3144 int err;
3145
Emmanuel Grumbachf9aa8dd2013-03-04 09:11:08 +02003146 /*
3147 * If mac80211 is cleaning its state, then say that we finished since
3148 * our state has been cleared anyway.
3149 */
3150 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3151 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3152 return 0;
3153 }
3154
Johannes Berg8ca151b2013-01-24 14:25:36 +01003155 spin_lock_bh(&mvmsta->lock);
3156
3157 txq_id = tid_data->txq_id;
3158
3159 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3160 mvmsta->sta_id, tid, txq_id, tid_data->state);
3161
Eyal Shapiraefed6642014-09-14 15:58:53 +03003162 mvmsta->agg_tids &= ~BIT(tid);
3163
Avraham Stern4b387902018-03-07 10:41:18 +02003164 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03003165
Johannes Berg8ca151b2013-01-24 14:25:36 +01003166 switch (tid_data->state) {
3167 case IWL_AGG_ON:
Johannes Berg9a886582013-02-15 19:25:00 +01003168 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003169
3170 IWL_DEBUG_TX_QUEUES(mvm,
3171 "ssn = %d, next_recl = %d\n",
3172 tid_data->ssn, tid_data->next_reclaimed);
3173
Johannes Berg8ca151b2013-01-24 14:25:36 +01003174 tid_data->ssn = 0xffff;
Johannes Bergf7f89e72014-08-05 15:24:44 +02003175 tid_data->state = IWL_AGG_OFF;
Johannes Bergf7f89e72014-08-05 15:24:44 +02003176 spin_unlock_bh(&mvmsta->lock);
3177
3178 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3179
3180 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
Johannes Bergf7f89e72014-08-05 15:24:44 +02003181 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003182 case IWL_AGG_STARTING:
3183 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3184 /*
3185 * The agg session has been stopped before it was set up. This
3186 * can happen when the AddBA timer times out for example.
3187 */
3188
3189 /* No barriers since we are under mutex */
3190 lockdep_assert_held(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003191
3192 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3193 tid_data->state = IWL_AGG_OFF;
3194 err = 0;
3195 break;
3196 default:
3197 IWL_ERR(mvm,
3198 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3199 mvmsta->sta_id, tid, tid_data->state);
3200 IWL_ERR(mvm,
3201 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3202 err = -EINVAL;
3203 }
3204
3205 spin_unlock_bh(&mvmsta->lock);
3206
3207 return err;
3208}
3209
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003210int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3211 struct ieee80211_sta *sta, u16 tid)
3212{
Johannes Berg5b577a92013-11-14 18:20:04 +01003213 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003214 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3215 u16 txq_id;
Johannes Bergb6658ff2013-07-24 13:55:51 +02003216 enum iwl_mvm_agg_state old_state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003217
3218 /*
3219 * First set the agg state to OFF to avoid calling
3220 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3221 */
3222 spin_lock_bh(&mvmsta->lock);
3223 txq_id = tid_data->txq_id;
3224 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3225 mvmsta->sta_id, tid, txq_id, tid_data->state);
Johannes Bergb6658ff2013-07-24 13:55:51 +02003226 old_state = tid_data->state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003227 tid_data->state = IWL_AGG_OFF;
Eyal Shapiraefed6642014-09-14 15:58:53 +03003228 mvmsta->agg_tids &= ~BIT(tid);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003229 spin_unlock_bh(&mvmsta->lock);
3230
Avraham Stern4b387902018-03-07 10:41:18 +02003231 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03003232
Johannes Bergb6658ff2013-07-24 13:55:51 +02003233 if (old_state >= IWL_AGG_ON) {
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02003234 iwl_mvm_drain_sta(mvm, mvmsta, true);
Sara Sharond6d517b2017-03-06 10:16:11 +02003235
Mordechai Goodsteind167e812017-05-10 16:42:53 +03003236 if (iwl_mvm_has_new_tx_api(mvm)) {
3237 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3238 BIT(tid), 0))
3239 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Sara Sharond6d517b2017-03-06 10:16:11 +02003240 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
Mordechai Goodsteind167e812017-05-10 16:42:53 +03003241 } else {
3242 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
3243 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Sara Sharond6d517b2017-03-06 10:16:11 +02003244 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
Mordechai Goodsteind167e812017-05-10 16:42:53 +03003245 }
Sara Sharond6d517b2017-03-06 10:16:11 +02003246
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02003247 iwl_mvm_drain_sta(mvm, mvmsta, false);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003248
Johannes Bergf7f89e72014-08-05 15:24:44 +02003249 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
Johannes Bergb6658ff2013-07-24 13:55:51 +02003250 }
3251
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003252 return 0;
3253}
3254
Johannes Berg8ca151b2013-01-24 14:25:36 +01003255static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3256{
Johannes Berg2dc2a152015-06-16 17:09:18 +02003257 int i, max = -1, max_offs = -1;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003258
3259 lockdep_assert_held(&mvm->mutex);
3260
Johannes Berg2dc2a152015-06-16 17:09:18 +02003261 /* Pick the unused key offset with the highest 'deleted'
3262 * counter. Every time a key is deleted, all the counters
3263 * are incremented and the one that was just deleted is
3264 * reset to zero. Thus, the highest counter is the one
3265 * that was deleted longest ago. Pick that one.
3266 */
3267 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3268 if (test_bit(i, mvm->fw_key_table))
3269 continue;
3270 if (mvm->fw_key_deleted[i] > max) {
3271 max = mvm->fw_key_deleted[i];
3272 max_offs = i;
3273 }
3274 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003275
Johannes Berg2dc2a152015-06-16 17:09:18 +02003276 if (max_offs < 0)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003277 return STA_KEY_IDX_INVALID;
3278
Johannes Berg2dc2a152015-06-16 17:09:18 +02003279 return max_offs;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003280}
3281
Johannes Berg5f7a1842015-12-11 09:36:10 +01003282static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3283 struct ieee80211_vif *vif,
3284 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003285{
Johannes Berg5b530e92014-12-23 16:00:17 +01003286 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003287
Johannes Berg5f7a1842015-12-11 09:36:10 +01003288 if (sta)
3289 return iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003290
3291 /*
3292 * The device expects GTKs for station interfaces to be
3293 * installed as GTKs for the AP station. If we have no
3294 * station ID, then use AP's station ID.
3295 */
3296 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02003297 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Avri Altman9513c5e2015-10-19 16:29:11 +02003298 u8 sta_id = mvmvif->ap_sta_id;
3299
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03003300 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3301 lockdep_is_held(&mvm->mutex));
3302
Avri Altman9513c5e2015-10-19 16:29:11 +02003303 /*
3304 * It is possible that the 'sta' parameter is NULL,
3305 * for example when a GTK is removed - the sta_id will then
3306 * be the AP ID, and no station was passed by mac80211.
3307 */
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03003308 if (IS_ERR_OR_NULL(sta))
3309 return NULL;
3310
3311 return iwl_mvm_sta_from_mac80211(sta);
Avri Altman9513c5e2015-10-19 16:29:11 +02003312 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003313
Johannes Berg5f7a1842015-12-11 09:36:10 +01003314 return NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003315}
3316
3317static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
David Spinadel85aeb582017-03-30 19:43:53 +03003318 u32 sta_id,
Sara Sharon45c458b2016-11-09 15:43:26 +02003319 struct ieee80211_key_conf *key, bool mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003320 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003321 u8 key_offset, bool mfp)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003322{
Sara Sharon45c458b2016-11-09 15:43:26 +02003323 union {
3324 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3325 struct iwl_mvm_add_sta_key_cmd cmd;
3326 } u = {};
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003327 __le16 key_flags;
Johannes Berg79920742014-11-03 15:43:04 +01003328 int ret;
3329 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003330 u16 keyidx;
Sara Sharon45c458b2016-11-09 15:43:26 +02003331 u64 pn = 0;
3332 int i, size;
3333 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3334 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003335
David Spinadel85aeb582017-03-30 19:43:53 +03003336 if (sta_id == IWL_MVM_INVALID_STA)
3337 return -EINVAL;
3338
Sara Sharon45c458b2016-11-09 15:43:26 +02003339 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
Johannes Berg8ca151b2013-01-24 14:25:36 +01003340 STA_KEY_FLG_KEYID_MSK;
3341 key_flags = cpu_to_le16(keyidx);
3342 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3343
Sara Sharon45c458b2016-11-09 15:43:26 +02003344 switch (key->cipher) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003345 case WLAN_CIPHER_SUITE_TKIP:
3346 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003347 if (new_api) {
3348 memcpy((void *)&u.cmd.tx_mic_key,
3349 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3350 IWL_MIC_KEY_SIZE);
3351
3352 memcpy((void *)&u.cmd.rx_mic_key,
3353 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3354 IWL_MIC_KEY_SIZE);
3355 pn = atomic64_read(&key->tx_pn);
3356
3357 } else {
3358 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3359 for (i = 0; i < 5; i++)
3360 u.cmd_v1.tkip_rx_ttak[i] =
3361 cpu_to_le16(tkip_p1k[i]);
3362 }
3363 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003364 break;
3365 case WLAN_CIPHER_SUITE_CCMP:
3366 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
Sara Sharon45c458b2016-11-09 15:43:26 +02003367 memcpy(u.cmd.common.key, key->key, key->keylen);
3368 if (new_api)
3369 pn = atomic64_read(&key->tx_pn);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003370 break;
Johannes Bergba3943b2014-11-12 23:54:48 +01003371 case WLAN_CIPHER_SUITE_WEP104:
3372 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
John W. Linvilleaa0cb082015-01-12 16:18:11 -05003373 /* fall through */
Johannes Bergba3943b2014-11-12 23:54:48 +01003374 case WLAN_CIPHER_SUITE_WEP40:
3375 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003376 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
Johannes Bergba3943b2014-11-12 23:54:48 +01003377 break;
Ayala Beker2a53d162016-04-07 16:21:57 +03003378 case WLAN_CIPHER_SUITE_GCMP_256:
3379 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3380 /* fall through */
3381 case WLAN_CIPHER_SUITE_GCMP:
3382 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003383 memcpy(u.cmd.common.key, key->key, key->keylen);
3384 if (new_api)
3385 pn = atomic64_read(&key->tx_pn);
Ayala Beker2a53d162016-04-07 16:21:57 +03003386 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003387 default:
Max Stepanove36e5432013-08-27 19:56:13 +03003388 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
Sara Sharon45c458b2016-11-09 15:43:26 +02003389 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003390 }
3391
Johannes Bergba3943b2014-11-12 23:54:48 +01003392 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003393 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003394 if (mfp)
3395 key_flags |= cpu_to_le16(STA_KEY_MFP);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003396
Sara Sharon45c458b2016-11-09 15:43:26 +02003397 u.cmd.common.key_offset = key_offset;
3398 u.cmd.common.key_flags = key_flags;
David Spinadel85aeb582017-03-30 19:43:53 +03003399 u.cmd.common.sta_id = sta_id;
Sara Sharon45c458b2016-11-09 15:43:26 +02003400
3401 if (new_api) {
3402 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3403 size = sizeof(u.cmd);
3404 } else {
3405 size = sizeof(u.cmd_v1);
3406 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003407
3408 status = ADD_STA_SUCCESS;
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003409 if (cmd_flags & CMD_ASYNC)
Sara Sharon45c458b2016-11-09 15:43:26 +02003410 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3411 &u.cmd);
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003412 else
Sara Sharon45c458b2016-11-09 15:43:26 +02003413 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3414 &u.cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003415
3416 switch (status) {
3417 case ADD_STA_SUCCESS:
3418 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3419 break;
3420 default:
3421 ret = -EIO;
3422 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3423 break;
3424 }
3425
3426 return ret;
3427}
3428
3429static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3430 struct ieee80211_key_conf *keyconf,
3431 u8 sta_id, bool remove_key)
3432{
3433 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3434
3435 /* verify the key details match the required command's expectations */
Ayala Beker8e160ab2016-04-11 11:37:38 +03003436 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3437 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3438 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3439 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3440 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3441 return -EINVAL;
3442
3443 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3444 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
Johannes Berg8ca151b2013-01-24 14:25:36 +01003445 return -EINVAL;
3446
3447 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3448 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3449
3450 if (remove_key) {
3451 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3452 } else {
3453 struct ieee80211_key_seq seq;
3454 const u8 *pn;
3455
Ayala Bekeraa950522016-06-01 00:28:09 +03003456 switch (keyconf->cipher) {
3457 case WLAN_CIPHER_SUITE_AES_CMAC:
3458 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3459 break;
Ayala Beker8e160ab2016-04-11 11:37:38 +03003460 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3461 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3462 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3463 break;
Ayala Bekeraa950522016-06-01 00:28:09 +03003464 default:
3465 return -EINVAL;
3466 }
3467
Ayala Beker8e160ab2016-04-11 11:37:38 +03003468 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3469 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3470 igtk_cmd.ctrl_flags |=
3471 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003472 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3473 pn = seq.aes_cmac.pn;
3474 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3475 ((u64) pn[4] << 8) |
3476 ((u64) pn[3] << 16) |
3477 ((u64) pn[2] << 24) |
3478 ((u64) pn[1] << 32) |
3479 ((u64) pn[0] << 40));
3480 }
3481
3482 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3483 remove_key ? "removing" : "installing",
3484 igtk_cmd.sta_id);
3485
Ayala Beker8e160ab2016-04-11 11:37:38 +03003486 if (!iwl_mvm_has_new_rx_api(mvm)) {
3487 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3488 .ctrl_flags = igtk_cmd.ctrl_flags,
3489 .key_id = igtk_cmd.key_id,
3490 .sta_id = igtk_cmd.sta_id,
3491 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3492 };
3493
3494 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3495 ARRAY_SIZE(igtk_cmd_v1.igtk));
3496 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3497 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3498 }
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003499 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003500 sizeof(igtk_cmd), &igtk_cmd);
3501}
3502
3503
3504static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3505 struct ieee80211_vif *vif,
3506 struct ieee80211_sta *sta)
3507{
Johannes Berg5b530e92014-12-23 16:00:17 +01003508 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003509
3510 if (sta)
3511 return sta->addr;
3512
3513 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02003514 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003515 u8 sta_id = mvmvif->ap_sta_id;
3516 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3517 lockdep_is_held(&mvm->mutex));
3518 return sta->addr;
3519 }
3520
3521
3522 return NULL;
3523}
3524
Johannes Berg2f6319d2014-11-12 23:39:56 +01003525static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3526 struct ieee80211_vif *vif,
3527 struct ieee80211_sta *sta,
Johannes Bergba3943b2014-11-12 23:54:48 +01003528 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003529 u8 key_offset,
Johannes Bergba3943b2014-11-12 23:54:48 +01003530 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003531{
Johannes Berg8ca151b2013-01-24 14:25:36 +01003532 int ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003533 const u8 *addr;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003534 struct ieee80211_key_seq seq;
3535 u16 p1k[5];
David Spinadel85aeb582017-03-30 19:43:53 +03003536 u32 sta_id;
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003537 bool mfp = false;
David Spinadel85aeb582017-03-30 19:43:53 +03003538
3539 if (sta) {
3540 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3541
3542 sta_id = mvm_sta->sta_id;
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003543 mfp = sta->mfp;
David Spinadel85aeb582017-03-30 19:43:53 +03003544 } else if (vif->type == NL80211_IFTYPE_AP &&
3545 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3546 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3547
3548 sta_id = mvmvif->mcast_sta.sta_id;
3549 } else {
3550 IWL_ERR(mvm, "Failed to find station id\n");
3551 return -EINVAL;
3552 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003553
Johannes Berg8ca151b2013-01-24 14:25:36 +01003554 switch (keyconf->cipher) {
3555 case WLAN_CIPHER_SUITE_TKIP:
3556 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3557 /* get phase 1 key from mac80211 */
3558 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3559 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
David Spinadel85aeb582017-03-30 19:43:53 +03003560 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003561 seq.tkip.iv32, p1k, 0, key_offset,
3562 mfp);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003563 break;
3564 case WLAN_CIPHER_SUITE_CCMP:
Johannes Bergba3943b2014-11-12 23:54:48 +01003565 case WLAN_CIPHER_SUITE_WEP40:
3566 case WLAN_CIPHER_SUITE_WEP104:
Ayala Beker2a53d162016-04-07 16:21:57 +03003567 case WLAN_CIPHER_SUITE_GCMP:
3568 case WLAN_CIPHER_SUITE_GCMP_256:
David Spinadel85aeb582017-03-30 19:43:53 +03003569 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003570 0, NULL, 0, key_offset, mfp);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003571 break;
3572 default:
David Spinadel85aeb582017-03-30 19:43:53 +03003573 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003574 0, NULL, 0, key_offset, mfp);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003575 }
3576
Johannes Berg8ca151b2013-01-24 14:25:36 +01003577 return ret;
3578}
3579
Johannes Berg2f6319d2014-11-12 23:39:56 +01003580static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
Johannes Bergba3943b2014-11-12 23:54:48 +01003581 struct ieee80211_key_conf *keyconf,
3582 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003583{
Sara Sharon45c458b2016-11-09 15:43:26 +02003584 union {
3585 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3586 struct iwl_mvm_add_sta_key_cmd cmd;
3587 } u = {};
3588 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3589 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003590 __le16 key_flags;
Sara Sharon45c458b2016-11-09 15:43:26 +02003591 int ret, size;
Johannes Berg79920742014-11-03 15:43:04 +01003592 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003593
Sara Sharone4f13ad2018-01-15 13:50:59 +02003594 /* This is a valid situation for GTK removal */
David Spinadel85aeb582017-03-30 19:43:53 +03003595 if (sta_id == IWL_MVM_INVALID_STA)
Sara Sharone4f13ad2018-01-15 13:50:59 +02003596 return 0;
David Spinadel85aeb582017-03-30 19:43:53 +03003597
Emmanuel Grumbach8115efb2013-02-05 10:08:35 +02003598 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3599 STA_KEY_FLG_KEYID_MSK);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003600 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3601 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3602
Johannes Bergba3943b2014-11-12 23:54:48 +01003603 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003604 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3605
Sara Sharon45c458b2016-11-09 15:43:26 +02003606 /*
3607 * The fields assigned here are in the same location at the start
3608 * of the command, so we can do this union trick.
3609 */
3610 u.cmd.common.key_flags = key_flags;
3611 u.cmd.common.key_offset = keyconf->hw_key_idx;
3612 u.cmd.common.sta_id = sta_id;
3613
3614 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003615
Johannes Berg8ca151b2013-01-24 14:25:36 +01003616 status = ADD_STA_SUCCESS;
Sara Sharon45c458b2016-11-09 15:43:26 +02003617 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3618 &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003619
3620 switch (status) {
3621 case ADD_STA_SUCCESS:
3622 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3623 break;
3624 default:
3625 ret = -EIO;
3626 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3627 break;
3628 }
3629
3630 return ret;
3631}
3632
Johannes Berg2f6319d2014-11-12 23:39:56 +01003633int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3634 struct ieee80211_vif *vif,
3635 struct ieee80211_sta *sta,
3636 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003637 u8 key_offset)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003638{
Johannes Bergba3943b2014-11-12 23:54:48 +01003639 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003640 struct iwl_mvm_sta *mvm_sta;
David Spinadel85aeb582017-03-30 19:43:53 +03003641 u8 sta_id = IWL_MVM_INVALID_STA;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003642 int ret;
Matti Gottlieb11828db2015-06-01 15:15:11 +03003643 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
Johannes Berg2f6319d2014-11-12 23:39:56 +01003644
3645 lockdep_assert_held(&mvm->mutex);
3646
David Spinadel85aeb582017-03-30 19:43:53 +03003647 if (vif->type != NL80211_IFTYPE_AP ||
3648 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3649 /* Get the station id from the mvm local station table */
3650 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3651 if (!mvm_sta) {
3652 IWL_ERR(mvm, "Failed to find station\n");
Johannes Berg2f6319d2014-11-12 23:39:56 +01003653 return -EINVAL;
3654 }
David Spinadel85aeb582017-03-30 19:43:53 +03003655 sta_id = mvm_sta->sta_id;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003656
David Spinadel85aeb582017-03-30 19:43:53 +03003657 /*
3658 * It is possible that the 'sta' parameter is NULL, and thus
Beni Leve829b172018-02-20 13:41:54 +02003659 * there is a need to retrieve the sta from the local station
David Spinadel85aeb582017-03-30 19:43:53 +03003660 * table.
3661 */
3662 if (!sta) {
3663 sta = rcu_dereference_protected(
3664 mvm->fw_id_to_mac_id[sta_id],
3665 lockdep_is_held(&mvm->mutex));
3666 if (IS_ERR_OR_NULL(sta)) {
3667 IWL_ERR(mvm, "Invalid station id\n");
3668 return -EINVAL;
3669 }
3670 }
3671
3672 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3673 return -EINVAL;
Beni Leve829b172018-02-20 13:41:54 +02003674 } else {
3675 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3676
3677 sta_id = mvmvif->mcast_sta.sta_id;
3678 }
3679
3680 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3681 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3682 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3683 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3684 goto end;
David Spinadel85aeb582017-03-30 19:43:53 +03003685 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01003686
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003687 /* If the key_offset is not pre-assigned, we need to find a
3688 * new offset to use. In normal cases, the offset is not
3689 * pre-assigned, but during HW_RESTART we want to reuse the
3690 * same indices, so we pass them when this function is called.
3691 *
3692 * In D3 entry, we need to hardcoded the indices (because the
3693 * firmware hardcodes the PTK offset to 0). In this case, we
3694 * need to make sure we don't overwrite the hw_key_idx in the
3695 * keyconf structure, because otherwise we cannot configure
3696 * the original ones back when resuming.
3697 */
3698 if (key_offset == STA_KEY_IDX_INVALID) {
3699 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3700 if (key_offset == STA_KEY_IDX_INVALID)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003701 return -ENOSPC;
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003702 keyconf->hw_key_idx = key_offset;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003703 }
3704
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003705 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003706 if (ret)
Johannes Bergba3943b2014-11-12 23:54:48 +01003707 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003708
3709 /*
3710 * For WEP, the same key is used for multicast and unicast. Upload it
3711 * again, using the same key offset, and now pointing the other one
3712 * to the same key slot (offset).
3713 * If this fails, remove the original as well.
3714 */
David Spinadel85aeb582017-03-30 19:43:53 +03003715 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3716 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3717 sta) {
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003718 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3719 key_offset, !mcast);
Johannes Bergba3943b2014-11-12 23:54:48 +01003720 if (ret) {
Johannes Bergba3943b2014-11-12 23:54:48 +01003721 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003722 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003723 }
3724 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01003725
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003726 __set_bit(key_offset, mvm->fw_key_table);
3727
Johannes Berg2f6319d2014-11-12 23:39:56 +01003728end:
3729 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3730 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
Matti Gottlieb11828db2015-06-01 15:15:11 +03003731 sta ? sta->addr : zero_addr, ret);
Johannes Berg2f6319d2014-11-12 23:39:56 +01003732 return ret;
3733}
3734
3735int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3736 struct ieee80211_vif *vif,
3737 struct ieee80211_sta *sta,
3738 struct ieee80211_key_conf *keyconf)
3739{
Johannes Bergba3943b2014-11-12 23:54:48 +01003740 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003741 struct iwl_mvm_sta *mvm_sta;
Sara Sharon0ae98812017-01-04 14:53:58 +02003742 u8 sta_id = IWL_MVM_INVALID_STA;
Johannes Berg2dc2a152015-06-16 17:09:18 +02003743 int ret, i;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003744
3745 lockdep_assert_held(&mvm->mutex);
3746
Johannes Berg5f7a1842015-12-11 09:36:10 +01003747 /* Get the station from the mvm local station table */
3748 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
Luca Coelho71793b7d2017-03-30 12:04:47 +03003749 if (mvm_sta)
3750 sta_id = mvm_sta->sta_id;
David Spinadel85aeb582017-03-30 19:43:53 +03003751 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3752 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3753
Johannes Berg2f6319d2014-11-12 23:39:56 +01003754
3755 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3756 keyconf->keyidx, sta_id);
3757
Luca Coelho71793b7d2017-03-30 12:04:47 +03003758 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3759 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3760 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
Johannes Berg2f6319d2014-11-12 23:39:56 +01003761 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3762
3763 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3764 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3765 keyconf->hw_key_idx);
3766 return -ENOENT;
3767 }
3768
Johannes Berg2dc2a152015-06-16 17:09:18 +02003769 /* track which key was deleted last */
3770 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3771 if (mvm->fw_key_deleted[i] < U8_MAX)
3772 mvm->fw_key_deleted[i]++;
3773 }
3774 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3775
David Spinadel85aeb582017-03-30 19:43:53 +03003776 if (sta && !mvm_sta) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01003777 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3778 return 0;
3779 }
3780
Johannes Bergba3943b2014-11-12 23:54:48 +01003781 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3782 if (ret)
3783 return ret;
3784
3785 /* delete WEP key twice to get rid of (now useless) offset */
3786 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3787 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3788 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3789
3790 return ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003791}
3792
Johannes Berg8ca151b2013-01-24 14:25:36 +01003793void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3794 struct ieee80211_vif *vif,
3795 struct ieee80211_key_conf *keyconf,
3796 struct ieee80211_sta *sta, u32 iv32,
3797 u16 *phase1key)
3798{
Beni Levc3eb5362013-02-06 17:22:18 +02003799 struct iwl_mvm_sta *mvm_sta;
Johannes Bergba3943b2014-11-12 23:54:48 +01003800 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003801 bool mfp = sta ? sta->mfp : false;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003802
Beni Levc3eb5362013-02-06 17:22:18 +02003803 rcu_read_lock();
3804
Johannes Berg5f7a1842015-12-11 09:36:10 +01003805 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3806 if (WARN_ON_ONCE(!mvm_sta))
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003807 goto unlock;
David Spinadel85aeb582017-03-30 19:43:53 +03003808 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003809 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3810 mfp);
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003811
3812 unlock:
Beni Levc3eb5362013-02-06 17:22:18 +02003813 rcu_read_unlock();
Johannes Berg8ca151b2013-01-24 14:25:36 +01003814}
3815
Johannes Berg9cc40712013-02-15 22:47:48 +01003816void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3817 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003818{
Johannes Berg5b577a92013-11-14 18:20:04 +01003819 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003820 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003821 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003822 .sta_id = mvmsta->sta_id,
Emmanuel Grumbach5af01772013-06-09 12:59:24 +03003823 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
Johannes Berg9cc40712013-02-15 22:47:48 +01003824 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003825 };
3826 int ret;
3827
Sara Sharon854c5702016-01-26 13:17:47 +02003828 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3829 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003830 if (ret)
3831 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3832}
3833
Johannes Berg9cc40712013-02-15 22:47:48 +01003834void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3835 struct ieee80211_sta *sta,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003836 enum ieee80211_frame_release_type reason,
Johannes Berg3e56ead2013-02-15 22:23:18 +01003837 u16 cnt, u16 tids, bool more_data,
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003838 bool single_sta_queue)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003839{
Johannes Berg5b577a92013-11-14 18:20:04 +01003840 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003841 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003842 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003843 .sta_id = mvmsta->sta_id,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003844 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3845 .sleep_tx_count = cpu_to_le16(cnt),
Johannes Berg9cc40712013-02-15 22:47:48 +01003846 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003847 };
Johannes Berg3e56ead2013-02-15 22:23:18 +01003848 int tid, ret;
3849 unsigned long _tids = tids;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003850
Johannes Berg3e56ead2013-02-15 22:23:18 +01003851 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3852 * Note that this field is reserved and unused by firmware not
3853 * supporting GO uAPSD, so it's safe to always do this.
3854 */
3855 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3856 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3857
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003858 /* If we're releasing frames from aggregation or dqa queues then check
3859 * if all the queues that we're releasing frames from, combined, have:
Johannes Berg3e56ead2013-02-15 22:23:18 +01003860 * - more frames than the service period, in which case more_data
3861 * needs to be set
3862 * - fewer than 'cnt' frames, in which case we need to adjust the
3863 * firmware command (but do that unconditionally)
3864 */
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003865 if (single_sta_queue) {
Johannes Berg3e56ead2013-02-15 22:23:18 +01003866 int remaining = cnt;
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003867 int sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003868
3869 spin_lock_bh(&mvmsta->lock);
3870 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3871 struct iwl_mvm_tid_data *tid_data;
3872 u16 n_queued;
3873
3874 tid_data = &mvmsta->tid_data[tid];
Johannes Berg3e56ead2013-02-15 22:23:18 +01003875
Liad Kaufmandd321622017-04-05 16:25:11 +03003876 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003877 if (n_queued > remaining) {
3878 more_data = true;
3879 remaining = 0;
3880 break;
3881 }
3882 remaining -= n_queued;
3883 }
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003884 sleep_tx_count = cnt - remaining;
3885 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3886 mvmsta->sleep_tx_count = sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003887 spin_unlock_bh(&mvmsta->lock);
3888
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003889 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003890 if (WARN_ON(cnt - remaining == 0)) {
3891 ieee80211_sta_eosp(sta);
3892 return;
3893 }
3894 }
3895
3896 /* Note: this is ignored by firmware not supporting GO uAPSD */
3897 if (more_data)
Sara Sharonced19f22017-02-06 19:09:32 +02003898 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003899
3900 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3901 mvmsta->next_status_eosp = true;
Sara Sharonced19f22017-02-06 19:09:32 +02003902 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003903 } else {
Sara Sharonced19f22017-02-06 19:09:32 +02003904 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003905 }
3906
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +02003907 /* block the Tx queues until the FW updated the sleep Tx count */
3908 iwl_trans_block_txq_ptrs(mvm->trans, true);
3909
3910 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3911 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
Sara Sharon854c5702016-01-26 13:17:47 +02003912 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003913 if (ret)
3914 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3915}
Johannes Berg3e56ead2013-02-15 22:23:18 +01003916
Johannes Berg04168412015-06-23 21:22:09 +02003917void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3918 struct iwl_rx_cmd_buffer *rxb)
Johannes Berg3e56ead2013-02-15 22:23:18 +01003919{
3920 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3921 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3922 struct ieee80211_sta *sta;
3923 u32 sta_id = le32_to_cpu(notif->sta_id);
3924
3925 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
Johannes Berg04168412015-06-23 21:22:09 +02003926 return;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003927
3928 rcu_read_lock();
3929 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3930 if (!IS_ERR_OR_NULL(sta))
3931 ieee80211_sta_eosp(sta);
3932 rcu_read_unlock();
Johannes Berg3e56ead2013-02-15 22:23:18 +01003933}
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003934
3935void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3936 struct iwl_mvm_sta *mvmsta, bool disable)
3937{
3938 struct iwl_mvm_add_sta_cmd cmd = {
3939 .add_modify = STA_MODE_MODIFY,
3940 .sta_id = mvmsta->sta_id,
3941 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3942 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3943 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3944 };
3945 int ret;
3946
Sara Sharon854c5702016-01-26 13:17:47 +02003947 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3948 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003949 if (ret)
3950 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3951}
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003952
3953void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3954 struct ieee80211_sta *sta,
3955 bool disable)
3956{
3957 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3958
3959 spin_lock_bh(&mvm_sta->lock);
3960
3961 if (mvm_sta->disable_tx == disable) {
3962 spin_unlock_bh(&mvm_sta->lock);
3963 return;
3964 }
3965
3966 mvm_sta->disable_tx = disable;
3967
Johannes Bergc8f54702017-06-19 23:50:31 +02003968 /* Tell mac80211 to start/stop queuing tx for this station */
3969 ieee80211_sta_block_awake(mvm->hw, sta, disable);
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003970
3971 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3972
3973 spin_unlock_bh(&mvm_sta->lock);
3974}
3975
Sara Sharonced19f22017-02-06 19:09:32 +02003976static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3977 struct iwl_mvm_vif *mvmvif,
3978 struct iwl_mvm_int_sta *sta,
3979 bool disable)
3980{
3981 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3982 struct iwl_mvm_add_sta_cmd cmd = {
3983 .add_modify = STA_MODE_MODIFY,
3984 .sta_id = sta->sta_id,
3985 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3986 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3987 .mac_id_n_color = cpu_to_le32(id),
3988 };
3989 int ret;
3990
3991 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3992 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3993 if (ret)
3994 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3995}
3996
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003997void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3998 struct iwl_mvm_vif *mvmvif,
3999 bool disable)
4000{
4001 struct ieee80211_sta *sta;
4002 struct iwl_mvm_sta *mvm_sta;
4003 int i;
4004
4005 lockdep_assert_held(&mvm->mutex);
4006
4007 /* Block/unblock all the stations of the given mvmvif */
Sara Sharon0ae98812017-01-04 14:53:58 +02004008 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03004009 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
4010 lockdep_is_held(&mvm->mutex));
4011 if (IS_ERR_OR_NULL(sta))
4012 continue;
4013
4014 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4015 if (mvm_sta->mac_id_n_color !=
4016 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
4017 continue;
4018
4019 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
4020 }
Sara Sharonced19f22017-02-06 19:09:32 +02004021
4022 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
4023 return;
4024
4025 /* Need to block/unblock also multicast station */
4026 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
4027 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4028 &mvmvif->mcast_sta, disable);
4029
4030 /*
4031 * Only unblock the broadcast station (FW blocks it for immediate
4032 * quiet, not the driver)
4033 */
4034 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
4035 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4036 &mvmvif->bcast_sta, disable);
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03004037}
Luciano Coelhodc88b4b2014-11-10 11:10:14 +02004038
4039void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
4040{
4041 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4042 struct iwl_mvm_sta *mvmsta;
4043
4044 rcu_read_lock();
4045
4046 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
4047
4048 if (!WARN_ON(!mvmsta))
4049 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
4050
4051 rcu_read_unlock();
4052}
Liad Kaufmandd321622017-04-05 16:25:11 +03004053
4054u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
4055{
4056 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
4057
4058 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02004059 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
Liad Kaufmandd321622017-04-05 16:25:11 +03004060 * to align the wrap around of ssn so we compare relevant values.
4061 */
4062 if (mvm->trans->cfg->gen2)
4063 sn &= 0xff;
4064
4065 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
4066}