blob: ec4925c892957cd3f93a7c8aebd10b660c00c93f [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03008 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020010 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Emmanuel Grumbach48831452018-01-29 10:00:05 +020011 * Copyright(c) 2018 Intel Corporation
Johannes Berg8ca151b2013-01-24 14:25:36 +010012 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
Johannes Berg8ca151b2013-01-24 14:25:36 +010022 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020023 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010024 *
25 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020026 * Intel Linux Wireless <linuxwifi@intel.com>
Johannes Berg8ca151b2013-01-24 14:25:36 +010027 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *
29 * BSD LICENSE
30 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +030031 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020033 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Liad Kaufmanbd8f3fc2018-01-17 15:25:28 +020034 * Copyright(c) 2018 Intel Corporation
Johannes Berg8ca151b2013-01-24 14:25:36 +010035 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
46 * distribution.
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *
63 *****************************************************************************/
64#include <net/mac80211.h>
65
66#include "mvm.h"
67#include "sta.h"
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +030068#include "rs.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010069
Avraham Stern337bfc92018-06-04 15:10:18 +030070static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm);
71
72static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
73 u32 sta_id,
74 struct ieee80211_key_conf *key, bool mcast,
75 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
76 u8 key_offset, bool mfp);
77
Sara Sharon854c5702016-01-26 13:17:47 +020078/*
79 * New version of ADD_STA_sta command added new fields at the end of the
80 * structure, so sending the size of the relevant API's structure is enough to
81 * support both API versions.
82 */
83static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
84{
Sara Sharonced19f22017-02-06 19:09:32 +020085 if (iwl_mvm_has_new_rx_api(mvm) ||
86 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
87 return sizeof(struct iwl_mvm_add_sta_cmd);
88 else
89 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
Sara Sharon854c5702016-01-26 13:17:47 +020090}
91
Eliad Pellerb92e6612014-01-23 17:58:23 +020092static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
93 enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +010094{
95 int sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +020096 u32 reserved_ids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +010097
Eliad Pellerb92e6612014-01-23 17:58:23 +020098 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
Johannes Berg8ca151b2013-01-24 14:25:36 +010099 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
100
101 lockdep_assert_held(&mvm->mutex);
102
Eliad Pellerb92e6612014-01-23 17:58:23 +0200103 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
104 if (iftype != NL80211_IFTYPE_STATION)
105 reserved_ids = BIT(0);
106
Johannes Berg8ca151b2013-01-24 14:25:36 +0100107 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
Sara Sharon0ae98812017-01-04 14:53:58 +0200108 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
Eliad Pellerb92e6612014-01-23 17:58:23 +0200109 if (BIT(sta_id) & reserved_ids)
110 continue;
111
Johannes Berg8ca151b2013-01-24 14:25:36 +0100112 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
113 lockdep_is_held(&mvm->mutex)))
114 return sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +0200115 }
Sara Sharon0ae98812017-01-04 14:53:58 +0200116 return IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100117}
118
Johannes Berg7a453972013-02-12 13:10:44 +0100119/* send station add/update command to firmware */
120int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Liad Kaufman24afba72015-07-28 18:56:08 +0300121 bool update, unsigned int flags)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100122{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +0100123 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300124 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
125 .sta_id = mvm_sta->sta_id,
126 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
127 .add_modify = update ? 1 : 0,
128 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
Naftali Goldstein8addabf2017-07-27 04:53:55 +0300129 STA_FLG_MIMO_EN_MSK |
130 STA_FLG_RTS_MIMO_PROT),
Liad Kaufmancf0cda12015-09-24 10:44:12 +0200131 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300132 };
Johannes Berg8ca151b2013-01-24 14:25:36 +0100133 int ret;
134 u32 status;
135 u32 agg_size = 0, mpdu_dens = 0;
136
Sara Sharonced19f22017-02-06 19:09:32 +0200137 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
138 add_sta_cmd.station_type = mvm_sta->sta_type;
139
Liad Kaufman24afba72015-07-28 18:56:08 +0300140 if (!update || (flags & STA_MODIFY_QUEUES)) {
Johannes Berg7a453972013-02-12 13:10:44 +0100141 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
Liad Kaufman24afba72015-07-28 18:56:08 +0300142
Sara Sharonbb497012016-09-29 14:52:40 +0300143 if (!iwl_mvm_has_new_tx_api(mvm)) {
144 add_sta_cmd.tfd_queue_msk =
145 cpu_to_le32(mvm_sta->tfd_queue_msk);
146
147 if (flags & STA_MODIFY_QUEUES)
148 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
149 } else {
150 WARN_ON(flags & STA_MODIFY_QUEUES);
151 }
Johannes Berg7a453972013-02-12 13:10:44 +0100152 }
Johannes Berg5bc5aaa2013-02-12 14:35:36 +0100153
154 switch (sta->bandwidth) {
155 case IEEE80211_STA_RX_BW_160:
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
157 /* fall through */
158 case IEEE80211_STA_RX_BW_80:
159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
160 /* fall through */
161 case IEEE80211_STA_RX_BW_40:
162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
163 /* fall through */
164 case IEEE80211_STA_RX_BW_20:
165 if (sta->ht_cap.ht_supported)
166 add_sta_cmd.station_flags |=
167 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
168 break;
169 }
170
171 switch (sta->rx_nss) {
172 case 1:
173 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
174 break;
175 case 2:
176 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
177 break;
178 case 3 ... 8:
179 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
180 break;
181 }
182
183 switch (sta->smps_mode) {
184 case IEEE80211_SMPS_AUTOMATIC:
185 case IEEE80211_SMPS_NUM_MODES:
186 WARN_ON(1);
187 break;
188 case IEEE80211_SMPS_STATIC:
189 /* override NSS */
190 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
191 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
192 break;
193 case IEEE80211_SMPS_DYNAMIC:
194 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
195 break;
196 case IEEE80211_SMPS_OFF:
197 /* nothing */
198 break;
199 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100200
201 if (sta->ht_cap.ht_supported) {
202 add_sta_cmd.station_flags_msk |=
203 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
204 STA_FLG_AGG_MPDU_DENS_MSK);
205
206 mpdu_dens = sta->ht_cap.ampdu_density;
207 }
208
209 if (sta->vht_cap.vht_supported) {
210 agg_size = sta->vht_cap.cap &
211 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
212 agg_size >>=
213 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
214 } else if (sta->ht_cap.ht_supported) {
215 agg_size = sta->ht_cap.ampdu_factor;
216 }
217
218 add_sta_cmd.station_flags |=
219 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
220 add_sta_cmd.station_flags |=
221 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
Gregory Greenmand94c5a82018-04-24 06:26:41 +0300222 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
Johannes Berg6ea29ce2016-12-01 16:25:30 +0100223 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100224
Johannes Berg65e25482016-04-13 14:24:22 +0200225 if (sta->wme) {
226 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
227
228 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200229 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
Johannes Berg65e25482016-04-13 14:24:22 +0200230 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200231 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
Johannes Berg65e25482016-04-13 14:24:22 +0200232 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200233 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
Johannes Berg65e25482016-04-13 14:24:22 +0200234 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200235 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
236 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
Emmanuel Grumbache71ca5e2017-02-08 14:53:32 +0200237 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
Johannes Berg65e25482016-04-13 14:24:22 +0200238 }
239
Johannes Berg8ca151b2013-01-24 14:25:36 +0100240 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +0200241 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
242 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +0300243 &add_sta_cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100244 if (ret)
245 return ret;
246
Sara Sharon837c4da2016-01-07 16:50:45 +0200247 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100248 case ADD_STA_SUCCESS:
249 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
250 break;
251 default:
252 ret = -EIO;
253 IWL_ERR(mvm, "ADD_STA failed\n");
254 break;
255 }
256
257 return ret;
258}
259
Kees Cook8cef5342017-10-24 02:29:37 -0700260static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
Sara Sharon10b2b202016-03-20 16:23:41 +0200261{
Kees Cook8cef5342017-10-24 02:29:37 -0700262 struct iwl_mvm_baid_data *data =
263 from_timer(data, t, session_timer);
264 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
Sara Sharon10b2b202016-03-20 16:23:41 +0200265 struct iwl_mvm_baid_data *ba_data;
266 struct ieee80211_sta *sta;
267 struct iwl_mvm_sta *mvm_sta;
268 unsigned long timeout;
269
270 rcu_read_lock();
271
272 ba_data = rcu_dereference(*rcu_ptr);
273
274 if (WARN_ON(!ba_data))
275 goto unlock;
276
277 if (!ba_data->timeout)
278 goto unlock;
279
280 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
281 if (time_is_after_jiffies(timeout)) {
282 mod_timer(&ba_data->session_timer, timeout);
283 goto unlock;
284 }
285
286 /* Timer expired */
287 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
Emmanuel Grumbach61dd8a82017-06-12 15:10:09 +0300288
289 /*
290 * sta should be valid unless the following happens:
291 * The firmware asserts which triggers a reconfig flow, but
292 * the reconfig fails before we set the pointer to sta into
293 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
294 * A-MDPU and hence the timer continues to run. Then, the
295 * timer expires and sta is NULL.
296 */
297 if (!sta)
298 goto unlock;
299
Sara Sharon10b2b202016-03-20 16:23:41 +0200300 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Naftali Goldstein20fc6902017-07-11 10:07:32 +0300301 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
302 sta->addr, ba_data->tid);
Sara Sharon10b2b202016-03-20 16:23:41 +0200303unlock:
304 rcu_read_unlock();
305}
306
Liad Kaufman9794c642015-08-19 17:34:28 +0300307/* Disable aggregations for a bitmap of TIDs for a given station */
308static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
309 unsigned long disable_agg_tids,
310 bool remove_queue)
311{
312 struct iwl_mvm_add_sta_cmd cmd = {};
313 struct ieee80211_sta *sta;
314 struct iwl_mvm_sta *mvmsta;
315 u32 status;
316 u8 sta_id;
317 int ret;
318
Sara Sharonbb497012016-09-29 14:52:40 +0300319 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
320 return -EINVAL;
321
Liad Kaufman9794c642015-08-19 17:34:28 +0300322 spin_lock_bh(&mvm->queue_info_lock);
323 sta_id = mvm->queue_info[queue].ra_sta_id;
324 spin_unlock_bh(&mvm->queue_info_lock);
325
326 rcu_read_lock();
327
328 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
329
330 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
331 rcu_read_unlock();
332 return -EINVAL;
333 }
334
335 mvmsta = iwl_mvm_sta_from_mac80211(sta);
336
337 mvmsta->tid_disable_agg |= disable_agg_tids;
338
339 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
340 cmd.sta_id = mvmsta->sta_id;
341 cmd.add_modify = STA_MODE_MODIFY;
342 cmd.modify_mask = STA_MODIFY_QUEUES;
343 if (disable_agg_tids)
344 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
345 if (remove_queue)
346 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
347 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
348 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
349
350 rcu_read_unlock();
351
352 /* Notify FW of queue removal from the STA queues */
353 status = ADD_STA_SUCCESS;
354 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
355 iwl_mvm_add_sta_cmd_size(mvm),
356 &cmd, &status);
357
358 return ret;
359}
360
Johannes Berg99448a82018-07-04 11:38:34 +0200361static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
362 int mac80211_queue, u8 tid, u8 flags)
363{
364 struct iwl_scd_txq_cfg_cmd cmd = {
365 .scd_queue = queue,
366 .action = SCD_CFG_DISABLE_QUEUE,
367 };
368 bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
369 int ret;
370
371 if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
372 return -EINVAL;
373
374 if (iwl_mvm_has_new_tx_api(mvm)) {
375 spin_lock_bh(&mvm->queue_info_lock);
376
377 if (remove_mac_queue)
378 mvm->hw_queue_to_mac80211[queue] &=
379 ~BIT(mac80211_queue);
380
381 spin_unlock_bh(&mvm->queue_info_lock);
382
383 iwl_trans_txq_free(mvm->trans, queue);
384
385 return 0;
386 }
387
388 spin_lock_bh(&mvm->queue_info_lock);
389
Johannes Berg1c140892018-07-04 11:58:28 +0200390 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) {
Johannes Berg99448a82018-07-04 11:38:34 +0200391 spin_unlock_bh(&mvm->queue_info_lock);
392 return 0;
393 }
394
395 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
396
397 /*
398 * If there is another TID with the same AC - don't remove the MAC queue
399 * from the mapping
400 */
401 if (tid < IWL_MAX_TID_COUNT) {
402 unsigned long tid_bitmap =
403 mvm->queue_info[queue].tid_bitmap;
404 int ac = tid_to_mac80211_ac[tid];
405 int i;
406
407 for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
408 if (tid_to_mac80211_ac[i] == ac)
409 remove_mac_queue = false;
410 }
411 }
412
413 if (remove_mac_queue)
414 mvm->hw_queue_to_mac80211[queue] &=
415 ~BIT(mac80211_queue);
Johannes Berg99448a82018-07-04 11:38:34 +0200416
Johannes Berg1c140892018-07-04 11:58:28 +0200417 cmd.action = mvm->queue_info[queue].tid_bitmap ?
Johannes Berg99448a82018-07-04 11:38:34 +0200418 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
419 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
420 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
421
422 IWL_DEBUG_TX_QUEUES(mvm,
Johannes Berg1c140892018-07-04 11:58:28 +0200423 "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
Johannes Berg99448a82018-07-04 11:38:34 +0200424 queue,
Johannes Berg1c140892018-07-04 11:58:28 +0200425 mvm->queue_info[queue].tid_bitmap,
Johannes Berg99448a82018-07-04 11:38:34 +0200426 mvm->hw_queue_to_mac80211[queue]);
427
428 /* If the queue is still enabled - nothing left to do in this func */
429 if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
430 spin_unlock_bh(&mvm->queue_info_lock);
431 return 0;
432 }
433
434 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
435 cmd.tid = mvm->queue_info[queue].txq_tid;
436
437 /* Make sure queue info is correct even though we overwrite it */
Johannes Berg1c140892018-07-04 11:58:28 +0200438 WARN(mvm->queue_info[queue].tid_bitmap ||
Johannes Berg99448a82018-07-04 11:38:34 +0200439 mvm->hw_queue_to_mac80211[queue],
Johannes Berg1c140892018-07-04 11:58:28 +0200440 "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n",
441 queue, mvm->hw_queue_to_mac80211[queue],
Johannes Berg99448a82018-07-04 11:38:34 +0200442 mvm->queue_info[queue].tid_bitmap);
443
444 /* If we are here - the queue is freed and we can zero out these vals */
Johannes Berg99448a82018-07-04 11:38:34 +0200445 mvm->queue_info[queue].tid_bitmap = 0;
446 mvm->hw_queue_to_mac80211[queue] = 0;
447
448 /* Regardless if this is a reserved TXQ for a STA - mark it as false */
449 mvm->queue_info[queue].reserved = false;
450
451 spin_unlock_bh(&mvm->queue_info_lock);
452
453 iwl_trans_txq_disable(mvm->trans, queue, false);
454 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
455 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
456
457 if (ret)
458 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
459 queue, ret);
460 return ret;
461}
462
Liad Kaufman42db09c2016-05-02 14:01:14 +0300463static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
464{
465 struct ieee80211_sta *sta;
466 struct iwl_mvm_sta *mvmsta;
467 unsigned long tid_bitmap;
468 unsigned long agg_tids = 0;
Sharon Dvir806911d2017-05-21 12:09:49 +0300469 u8 sta_id;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300470 int tid;
471
472 lockdep_assert_held(&mvm->mutex);
473
Sara Sharonbb497012016-09-29 14:52:40 +0300474 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
475 return -EINVAL;
476
Liad Kaufman42db09c2016-05-02 14:01:14 +0300477 spin_lock_bh(&mvm->queue_info_lock);
478 sta_id = mvm->queue_info[queue].ra_sta_id;
479 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
480 spin_unlock_bh(&mvm->queue_info_lock);
481
482 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
483 lockdep_is_held(&mvm->mutex));
484
485 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
486 return -EINVAL;
487
488 mvmsta = iwl_mvm_sta_from_mac80211(sta);
489
490 spin_lock_bh(&mvmsta->lock);
491 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
492 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
493 agg_tids |= BIT(tid);
494 }
495 spin_unlock_bh(&mvmsta->lock);
496
497 return agg_tids;
498}
499
Liad Kaufman9794c642015-08-19 17:34:28 +0300500/*
501 * Remove a queue from a station's resources.
502 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
503 * doesn't disable the queue
504 */
505static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
506{
507 struct ieee80211_sta *sta;
508 struct iwl_mvm_sta *mvmsta;
509 unsigned long tid_bitmap;
510 unsigned long disable_agg_tids = 0;
511 u8 sta_id;
512 int tid;
513
514 lockdep_assert_held(&mvm->mutex);
515
Sara Sharonbb497012016-09-29 14:52:40 +0300516 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
517 return -EINVAL;
518
Liad Kaufman9794c642015-08-19 17:34:28 +0300519 spin_lock_bh(&mvm->queue_info_lock);
520 sta_id = mvm->queue_info[queue].ra_sta_id;
521 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
522 spin_unlock_bh(&mvm->queue_info_lock);
523
524 rcu_read_lock();
525
526 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
527
528 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
529 rcu_read_unlock();
530 return 0;
531 }
532
533 mvmsta = iwl_mvm_sta_from_mac80211(sta);
534
535 spin_lock_bh(&mvmsta->lock);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300536 /* Unmap MAC queues and TIDs from this queue */
Liad Kaufman9794c642015-08-19 17:34:28 +0300537 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300538 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
539 disable_agg_tids |= BIT(tid);
Sara Sharon6862fce2017-02-22 19:34:17 +0200540 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman9794c642015-08-19 17:34:28 +0300541 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300542
Liad Kaufman42db09c2016-05-02 14:01:14 +0300543 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
Liad Kaufman9794c642015-08-19 17:34:28 +0300544 spin_unlock_bh(&mvmsta->lock);
545
546 rcu_read_unlock();
547
Liad Kaufman9794c642015-08-19 17:34:28 +0300548 return disable_agg_tids;
549}
550
Sara Sharon01796ff2016-11-16 17:04:36 +0200551static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
552 bool same_sta)
553{
554 struct iwl_mvm_sta *mvmsta;
555 u8 txq_curr_ac, sta_id, tid;
556 unsigned long disable_agg_tids = 0;
557 int ret;
558
559 lockdep_assert_held(&mvm->mutex);
560
Sara Sharonbb497012016-09-29 14:52:40 +0300561 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
562 return -EINVAL;
563
Sara Sharon01796ff2016-11-16 17:04:36 +0200564 spin_lock_bh(&mvm->queue_info_lock);
565 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
566 sta_id = mvm->queue_info[queue].ra_sta_id;
567 tid = mvm->queue_info[queue].txq_tid;
568 spin_unlock_bh(&mvm->queue_info_lock);
569
570 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
Sharon Dvire3df1e42017-02-21 10:41:31 +0200571 if (WARN_ON(!mvmsta))
572 return -EINVAL;
Sara Sharon01796ff2016-11-16 17:04:36 +0200573
574 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
575 /* Disable the queue */
576 if (disable_agg_tids)
577 iwl_mvm_invalidate_sta_queue(mvm, queue,
578 disable_agg_tids, false);
579
580 ret = iwl_mvm_disable_txq(mvm, queue,
581 mvmsta->vif->hw_queue[txq_curr_ac],
582 tid, 0);
583 if (ret) {
584 /* Re-mark the inactive queue as inactive */
585 spin_lock_bh(&mvm->queue_info_lock);
586 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
587 spin_unlock_bh(&mvm->queue_info_lock);
588 IWL_ERR(mvm,
589 "Failed to free inactive queue %d (ret=%d)\n",
590 queue, ret);
591
592 return ret;
593 }
594
595 /* If TXQ is allocated to another STA, update removal in FW */
596 if (!same_sta)
597 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
598
599 return 0;
600}
601
Liad Kaufman42db09c2016-05-02 14:01:14 +0300602static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
603 unsigned long tfd_queue_mask, u8 ac)
604{
605 int queue = 0;
606 u8 ac_to_queue[IEEE80211_NUM_ACS];
607 int i;
608
Johannes Berg90d2d942018-07-04 21:57:58 +0200609 /*
610 * This protects us against grabbing a queue that's being reconfigured
611 * by the inactivity checker.
612 */
613 lockdep_assert_held(&mvm->mutex);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300614 lockdep_assert_held(&mvm->queue_info_lock);
Johannes Berg90d2d942018-07-04 21:57:58 +0200615
Sara Sharonbb497012016-09-29 14:52:40 +0300616 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
617 return -EINVAL;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300618
619 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
620
621 /* See what ACs the existing queues for this STA have */
622 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
623 /* Only DATA queues can be shared */
624 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
625 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
626 continue;
627
628 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
629 }
630
631 /*
632 * The queue to share is chosen only from DATA queues as follows (in
633 * descending priority):
634 * 1. An AC_BE queue
635 * 2. Same AC queue
636 * 3. Highest AC queue that is lower than new AC
637 * 4. Any existing AC (there always is at least 1 DATA queue)
638 */
639
640 /* Priority 1: An AC_BE queue */
641 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
642 queue = ac_to_queue[IEEE80211_AC_BE];
643 /* Priority 2: Same AC queue */
644 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
645 queue = ac_to_queue[ac];
646 /* Priority 3a: If new AC is VO and VI exists - use VI */
647 else if (ac == IEEE80211_AC_VO &&
648 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
649 queue = ac_to_queue[IEEE80211_AC_VI];
650 /* Priority 3b: No BE so only AC less than the new one is BK */
651 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
652 queue = ac_to_queue[IEEE80211_AC_BK];
653 /* Priority 4a: No BE nor BK - use VI if exists */
654 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
655 queue = ac_to_queue[IEEE80211_AC_VI];
656 /* Priority 4b: No BE, BK nor VI - use VO if exists */
657 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
658 queue = ac_to_queue[IEEE80211_AC_VO];
659
660 /* Make sure queue found (or not) is legal */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200661 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
662 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
663 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
Liad Kaufman42db09c2016-05-02 14:01:14 +0300664 IWL_ERR(mvm, "No DATA queues available to share\n");
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200665 return -ENOSPC;
666 }
667
Liad Kaufman42db09c2016-05-02 14:01:14 +0300668 return queue;
669}
670
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200671/*
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200672 * If a given queue has a higher AC than the TID stream that is being compared
673 * to, the queue needs to be redirected to the lower AC. This function does that
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200674 * in such a case, otherwise - if no redirection required - it does nothing,
675 * unless the %force param is true.
676 */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200677int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
678 int ac, int ssn, unsigned int wdg_timeout,
679 bool force)
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200680{
681 struct iwl_scd_txq_cfg_cmd cmd = {
682 .scd_queue = queue,
Liad Kaufmanf7c692d2016-03-08 10:41:32 +0200683 .action = SCD_CFG_DISABLE_QUEUE,
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200684 };
685 bool shared_queue;
686 unsigned long mq;
687 int ret;
688
Sara Sharonbb497012016-09-29 14:52:40 +0300689 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
690 return -EINVAL;
691
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200692 /*
693 * If the AC is lower than current one - FIFO needs to be redirected to
694 * the lowest one of the streams in the queue. Check if this is needed
695 * here.
696 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
697 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
698 * we need to check if the numerical value of X is LARGER than of Y.
699 */
700 spin_lock_bh(&mvm->queue_info_lock);
701 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
702 spin_unlock_bh(&mvm->queue_info_lock);
703
704 IWL_DEBUG_TX_QUEUES(mvm,
705 "No redirection needed on TXQ #%d\n",
706 queue);
707 return 0;
708 }
709
710 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
711 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200712 cmd.tid = mvm->queue_info[queue].txq_tid;
Sara Sharon34e10862017-02-23 13:15:07 +0200713 mq = mvm->hw_queue_to_mac80211[queue];
Johannes Berg1c140892018-07-04 11:58:28 +0200714 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200715 spin_unlock_bh(&mvm->queue_info_lock);
716
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200717 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200718 queue, iwl_mvm_ac_to_tx_fifo[ac]);
719
720 /* Stop MAC queues and wait for this queue to empty */
721 iwl_mvm_stop_mac_queues(mvm, mq);
Sara Sharona1a57872017-03-05 11:38:58 +0200722 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200723 if (ret) {
724 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
725 queue);
726 ret = -EIO;
727 goto out;
728 }
729
730 /* Before redirecting the queue we need to de-activate it */
731 iwl_trans_txq_disable(mvm->trans, queue, false);
732 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
733 if (ret)
734 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
735 ret);
736
737 /* Make sure the SCD wrptr is correctly set before reconfiguring */
Sara Sharonca3b9c62016-06-30 16:14:02 +0300738 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200739
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200740 /* Update the TID "owner" of the queue */
741 spin_lock_bh(&mvm->queue_info_lock);
742 mvm->queue_info[queue].txq_tid = tid;
743 spin_unlock_bh(&mvm->queue_info_lock);
744
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200745 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
746
747 /* Redirect to lower AC */
748 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
Sara Sharon0ec9257b2017-10-16 09:45:10 +0300749 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200750
751 /* Update AC marking of the queue */
752 spin_lock_bh(&mvm->queue_info_lock);
753 mvm->queue_info[queue].mac80211_ac = ac;
754 spin_unlock_bh(&mvm->queue_info_lock);
755
756 /*
757 * Mark queue as shared in transport if shared
758 * Note this has to be done after queue enablement because enablement
759 * can also set this value, and there is no indication there to shared
760 * queues
761 */
762 if (shared_queue)
763 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
764
765out:
766 /* Continue using the MAC queues */
767 iwl_mvm_start_mac_queues(mvm, mq);
768
769 return ret;
770}
771
Johannes Berg99448a82018-07-04 11:38:34 +0200772static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
773 u8 minq, u8 maxq)
774{
775 int i;
776
777 lockdep_assert_held(&mvm->queue_info_lock);
778
779 /* This should not be hit with new TX path */
780 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
781 return -ENOSPC;
782
783 /* Start by looking for a free queue */
784 for (i = minq; i <= maxq; i++)
Johannes Berg1c140892018-07-04 11:58:28 +0200785 if (mvm->queue_info[i].tid_bitmap == 0 &&
Johannes Berg99448a82018-07-04 11:38:34 +0200786 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
787 return i;
788
789 return -ENOSPC;
790}
791
792static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
793 u8 sta_id, u8 tid, unsigned int timeout)
794{
795 int queue, size = IWL_DEFAULT_QUEUE_SIZE;
796
797 if (tid == IWL_MAX_TID_COUNT) {
798 tid = IWL_MGMT_TID;
799 size = IWL_MGMT_QUEUE_SIZE;
800 }
801 queue = iwl_trans_txq_alloc(mvm->trans,
802 cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
803 sta_id, tid, SCD_QUEUE_CFG, size, timeout);
804
805 if (queue < 0) {
806 IWL_DEBUG_TX_QUEUES(mvm,
807 "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
808 sta_id, tid, queue);
809 return queue;
810 }
811
812 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
813 queue, sta_id, tid);
814
815 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
816 IWL_DEBUG_TX_QUEUES(mvm,
817 "Enabling TXQ #%d (mac80211 map:0x%x)\n",
818 queue, mvm->hw_queue_to_mac80211[queue]);
819
820 return queue;
821}
822
Sara Sharon310181e2017-01-17 14:27:48 +0200823static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
824 struct ieee80211_sta *sta, u8 ac,
825 int tid)
826{
827 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
828 unsigned int wdg_timeout =
829 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
830 u8 mac_queue = mvmsta->vif->hw_queue[ac];
831 int queue = -1;
832
833 lockdep_assert_held(&mvm->mutex);
834
835 IWL_DEBUG_TX_QUEUES(mvm,
836 "Allocating queue for sta %d on tid %d\n",
837 mvmsta->sta_id, tid);
838 queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
839 wdg_timeout);
840 if (queue < 0)
841 return queue;
842
843 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
844
845 spin_lock_bh(&mvmsta->lock);
846 mvmsta->tid_data[tid].txq_id = queue;
847 mvmsta->tid_data[tid].is_tid_active = true;
Sara Sharon310181e2017-01-17 14:27:48 +0200848 spin_unlock_bh(&mvmsta->lock);
849
Sara Sharon310181e2017-01-17 14:27:48 +0200850 return 0;
851}
852
Johannes Berg99448a82018-07-04 11:38:34 +0200853static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
854 int mac80211_queue, u8 sta_id, u8 tid)
855{
856 bool enable_queue = true;
857
858 spin_lock_bh(&mvm->queue_info_lock);
859
860 /* Make sure this TID isn't already enabled */
861 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
862 spin_unlock_bh(&mvm->queue_info_lock);
863 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
864 queue, tid);
865 return false;
866 }
867
868 /* Update mappings and refcounts */
Johannes Berg1c140892018-07-04 11:58:28 +0200869 if (mvm->queue_info[queue].tid_bitmap)
Johannes Berg99448a82018-07-04 11:38:34 +0200870 enable_queue = false;
871
872 if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
873 WARN(mac80211_queue >=
874 BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
875 "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
876 mac80211_queue, queue, sta_id, tid);
877 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
878 }
879
Johannes Berg99448a82018-07-04 11:38:34 +0200880 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
881 mvm->queue_info[queue].ra_sta_id = sta_id;
882
883 if (enable_queue) {
884 if (tid != IWL_MAX_TID_COUNT)
885 mvm->queue_info[queue].mac80211_ac =
886 tid_to_mac80211_ac[tid];
887 else
888 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
889
890 mvm->queue_info[queue].txq_tid = tid;
891 }
892
893 IWL_DEBUG_TX_QUEUES(mvm,
Johannes Berg1c140892018-07-04 11:58:28 +0200894 "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
895 queue, mvm->queue_info[queue].tid_bitmap,
Johannes Berg99448a82018-07-04 11:38:34 +0200896 mvm->hw_queue_to_mac80211[queue]);
897
898 spin_unlock_bh(&mvm->queue_info_lock);
899
900 return enable_queue;
901}
902
903static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
904 int mac80211_queue, u16 ssn,
905 const struct iwl_trans_txq_scd_cfg *cfg,
906 unsigned int wdg_timeout)
907{
908 struct iwl_scd_txq_cfg_cmd cmd = {
909 .scd_queue = queue,
910 .action = SCD_CFG_ENABLE_QUEUE,
911 .window = cfg->frame_limit,
912 .sta_id = cfg->sta_id,
913 .ssn = cpu_to_le16(ssn),
914 .tx_fifo = cfg->fifo,
915 .aggregate = cfg->aggregate,
916 .tid = cfg->tid,
917 };
918 bool inc_ssn;
919
920 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
921 return false;
922
923 /* Send the enabling command if we need to */
924 if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
925 cfg->sta_id, cfg->tid))
926 return false;
927
928 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
929 NULL, wdg_timeout);
930 if (inc_ssn)
931 le16_add_cpu(&cmd.ssn, 1);
932
933 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
934 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
935
936 return inc_ssn;
937}
938
Liad Kaufman24afba72015-07-28 18:56:08 +0300939static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
940 struct ieee80211_sta *sta, u8 ac, int tid,
941 struct ieee80211_hdr *hdr)
942{
943 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
944 struct iwl_trans_txq_scd_cfg cfg = {
Emmanuel Grumbachcf6c6ea2017-06-13 13:18:48 +0300945 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
Liad Kaufman24afba72015-07-28 18:56:08 +0300946 .sta_id = mvmsta->sta_id,
947 .tid = tid,
948 .frame_limit = IWL_FRAME_LIMIT,
949 };
950 unsigned int wdg_timeout =
951 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
952 u8 mac_queue = mvmsta->vif->hw_queue[ac];
953 int queue = -1;
Sara Sharon01796ff2016-11-16 17:04:36 +0200954 bool using_inactive_queue = false, same_sta = false;
Liad Kaufman9794c642015-08-19 17:34:28 +0300955 unsigned long disable_agg_tids = 0;
956 enum iwl_mvm_agg_state queue_state;
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +0300957 bool shared_queue = false, inc_ssn;
Liad Kaufman24afba72015-07-28 18:56:08 +0300958 int ssn;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300959 unsigned long tfd_queue_mask;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300960 int ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300961
962 lockdep_assert_held(&mvm->mutex);
963
Sara Sharon310181e2017-01-17 14:27:48 +0200964 if (iwl_mvm_has_new_tx_api(mvm))
965 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
966
Liad Kaufman42db09c2016-05-02 14:01:14 +0300967 spin_lock_bh(&mvmsta->lock);
968 tfd_queue_mask = mvmsta->tfd_queue_msk;
969 spin_unlock_bh(&mvmsta->lock);
970
Liad Kaufmand2515a92016-03-23 16:31:08 +0200971 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300972
973 /*
974 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
975 * exists
976 */
977 if (!ieee80211_is_data_qos(hdr->frame_control) ||
978 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300979 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
980 IWL_MVM_DQA_MIN_MGMT_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300981 IWL_MVM_DQA_MAX_MGMT_QUEUE);
982 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
983 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
984 queue);
985
986 /* If no such queue is found, we'll use a DATA queue instead */
987 }
988
Liad Kaufman9794c642015-08-19 17:34:28 +0300989 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
990 (mvm->queue_info[mvmsta->reserved_queue].status ==
991 IWL_MVM_QUEUE_RESERVED ||
992 mvm->queue_info[mvmsta->reserved_queue].status ==
993 IWL_MVM_QUEUE_INACTIVE)) {
Liad Kaufman24afba72015-07-28 18:56:08 +0300994 queue = mvmsta->reserved_queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300995 mvm->queue_info[queue].reserved = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300996 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
997 }
998
999 if (queue < 0)
Liad Kaufman9794c642015-08-19 17:34:28 +03001000 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1001 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +03001002 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufmancf961e12015-08-13 19:16:08 +03001003
1004 /*
Liad Kaufman9794c642015-08-19 17:34:28 +03001005 * Check if this queue is already allocated but inactive.
1006 * In such a case, we'll need to first free this queue before enabling
1007 * it again, so we'll mark it as reserved to make sure no new traffic
1008 * arrives on it
1009 */
1010 if (queue > 0 &&
1011 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
1012 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1013 using_inactive_queue = true;
Sara Sharon01796ff2016-11-16 17:04:36 +02001014 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
Liad Kaufman9794c642015-08-19 17:34:28 +03001015 IWL_DEBUG_TX_QUEUES(mvm,
1016 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
1017 queue, mvmsta->sta_id, tid);
1018 }
1019
Liad Kaufman42db09c2016-05-02 14:01:14 +03001020 /* No free queue - we'll have to share */
1021 if (queue <= 0) {
1022 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1023 if (queue > 0) {
1024 shared_queue = true;
1025 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1026 }
1027 }
1028
Liad Kaufman9794c642015-08-19 17:34:28 +03001029 /*
Liad Kaufmancf961e12015-08-13 19:16:08 +03001030 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1031 * to make sure no one else takes it.
1032 * This will allow avoiding re-acquiring the lock at the end of the
1033 * configuration. On error we'll mark it back as free.
1034 */
Liad Kaufman42db09c2016-05-02 14:01:14 +03001035 if ((queue > 0) && !shared_queue)
Liad Kaufmancf961e12015-08-13 19:16:08 +03001036 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman24afba72015-07-28 18:56:08 +03001037
Liad Kaufmand2515a92016-03-23 16:31:08 +02001038 spin_unlock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +03001039
Liad Kaufman42db09c2016-05-02 14:01:14 +03001040 /* This shouldn't happen - out of queues */
1041 if (WARN_ON(queue <= 0)) {
1042 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1043 tid, cfg.sta_id);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001044 return queue;
Liad Kaufman42db09c2016-05-02 14:01:14 +03001045 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001046
1047 /*
1048 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1049 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1050 * as aggregatable.
1051 * Mark all DATA queues as allowing to be aggregated at some point
1052 */
Liad Kaufmand5216a22015-08-09 15:50:51 +03001053 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1054 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +03001055
Liad Kaufman9794c642015-08-19 17:34:28 +03001056 /*
1057 * If this queue was previously inactive (idle) - we need to free it
1058 * first
1059 */
1060 if (using_inactive_queue) {
Sara Sharon01796ff2016-11-16 17:04:36 +02001061 ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
1062 if (ret)
Liad Kaufman9794c642015-08-19 17:34:28 +03001063 return ret;
Liad Kaufman9794c642015-08-19 17:34:28 +03001064 }
1065
Liad Kaufman42db09c2016-05-02 14:01:14 +03001066 IWL_DEBUG_TX_QUEUES(mvm,
1067 "Allocating %squeue #%d to sta %d on tid %d\n",
1068 shared_queue ? "shared " : "", queue,
1069 mvmsta->sta_id, tid);
1070
1071 if (shared_queue) {
1072 /* Disable any open aggs on this queue */
1073 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1074
1075 if (disable_agg_tids) {
1076 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1077 queue);
1078 iwl_mvm_invalidate_sta_queue(mvm, queue,
1079 disable_agg_tids, false);
1080 }
Liad Kaufman42db09c2016-05-02 14:01:14 +03001081 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001082
1083 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +03001084 inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
1085 ssn, &cfg, wdg_timeout);
1086 if (inc_ssn) {
1087 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1088 le16_add_cpu(&hdr->seq_ctrl, 0x10);
1089 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001090
Liad Kaufman58f2cc52015-09-30 16:44:28 +02001091 /*
1092 * Mark queue as shared in transport if shared
1093 * Note this has to be done after queue enablement because enablement
1094 * can also set this value, and there is no indication there to shared
1095 * queues
1096 */
1097 if (shared_queue)
1098 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1099
Liad Kaufman24afba72015-07-28 18:56:08 +03001100 spin_lock_bh(&mvmsta->lock);
Emmanuel Grumbachdcfbd672017-05-07 15:00:31 +03001101 /*
1102 * This looks racy, but it is not. We have only one packet for
1103 * this ra/tid in our Tx path since we stop the Qdisc when we
1104 * need to allocate a new TFD queue.
1105 */
1106 if (inc_ssn)
1107 mvmsta->tid_data[tid].seq_number += 0x10;
Liad Kaufman24afba72015-07-28 18:56:08 +03001108 mvmsta->tid_data[tid].txq_id = queue;
Liad Kaufman9794c642015-08-19 17:34:28 +03001109 mvmsta->tid_data[tid].is_tid_active = true;
Liad Kaufman24afba72015-07-28 18:56:08 +03001110 mvmsta->tfd_queue_msk |= BIT(queue);
Liad Kaufman9794c642015-08-19 17:34:28 +03001111 queue_state = mvmsta->tid_data[tid].state;
Liad Kaufman24afba72015-07-28 18:56:08 +03001112
1113 if (mvmsta->reserved_queue == queue)
1114 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1115 spin_unlock_bh(&mvmsta->lock);
1116
Liad Kaufman42db09c2016-05-02 14:01:14 +03001117 if (!shared_queue) {
1118 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1119 if (ret)
1120 goto out_err;
Liad Kaufmancf961e12015-08-13 19:16:08 +03001121
Liad Kaufman42db09c2016-05-02 14:01:14 +03001122 /* If we need to re-enable aggregations... */
1123 if (queue_state == IWL_AGG_ON) {
1124 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1125 if (ret)
1126 goto out_err;
1127 }
Liad Kaufman58f2cc52015-09-30 16:44:28 +02001128 } else {
1129 /* Redirect queue, if needed */
1130 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
1131 wdg_timeout, false);
1132 if (ret)
1133 goto out_err;
Liad Kaufman42db09c2016-05-02 14:01:14 +03001134 }
Liad Kaufman9794c642015-08-19 17:34:28 +03001135
Liad Kaufman42db09c2016-05-02 14:01:14 +03001136 return 0;
Liad Kaufmancf961e12015-08-13 19:16:08 +03001137
1138out_err:
1139 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
1140
1141 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +03001142}
1143
Johannes Bergb3a87f12018-07-04 22:13:18 +02001144static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
Liad Kaufman19aefa42016-03-08 14:29:51 +02001145{
1146 struct iwl_scd_txq_cfg_cmd cmd = {
1147 .scd_queue = queue,
1148 .action = SCD_CFG_UPDATE_QUEUE_TID,
1149 };
Liad Kaufman19aefa42016-03-08 14:29:51 +02001150 int tid;
1151 unsigned long tid_bitmap;
1152 int ret;
1153
1154 lockdep_assert_held(&mvm->mutex);
1155
Sara Sharonbb497012016-09-29 14:52:40 +03001156 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1157 return;
1158
Liad Kaufman19aefa42016-03-08 14:29:51 +02001159 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001160 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1161 spin_unlock_bh(&mvm->queue_info_lock);
1162
1163 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
1164 return;
1165
1166 /* Find any TID for queue */
1167 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1168 cmd.tid = tid;
1169 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
1170
1171 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
Liad Kaufman341ca402016-09-18 14:51:59 +03001172 if (ret) {
Liad Kaufman19aefa42016-03-08 14:29:51 +02001173 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
1174 queue, ret);
Liad Kaufman341ca402016-09-18 14:51:59 +03001175 return;
1176 }
1177
1178 spin_lock_bh(&mvm->queue_info_lock);
1179 mvm->queue_info[queue].txq_tid = tid;
1180 spin_unlock_bh(&mvm->queue_info_lock);
1181 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
1182 queue, tid);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001183}
1184
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001185static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
1186{
1187 struct ieee80211_sta *sta;
1188 struct iwl_mvm_sta *mvmsta;
Sharon Dvir806911d2017-05-21 12:09:49 +03001189 u8 sta_id;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001190 int tid = -1;
1191 unsigned long tid_bitmap;
1192 unsigned int wdg_timeout;
1193 int ssn;
1194 int ret = true;
1195
Sara Sharonbb497012016-09-29 14:52:40 +03001196 /* queue sharing is disabled on new TX path */
1197 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1198 return;
1199
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001200 lockdep_assert_held(&mvm->mutex);
1201
1202 spin_lock_bh(&mvm->queue_info_lock);
1203 sta_id = mvm->queue_info[queue].ra_sta_id;
1204 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1205 spin_unlock_bh(&mvm->queue_info_lock);
1206
1207 /* Find TID for queue, and make sure it is the only one on the queue */
1208 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1209 if (tid_bitmap != BIT(tid)) {
1210 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
1211 queue, tid_bitmap);
1212 return;
1213 }
1214
1215 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1216 tid);
1217
1218 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1219 lockdep_is_held(&mvm->mutex));
1220
1221 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1222 return;
1223
1224 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1225 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1226
1227 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1228
1229 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
1230 tid_to_mac80211_ac[tid], ssn,
1231 wdg_timeout, true);
1232 if (ret) {
1233 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1234 return;
1235 }
1236
1237 /* If aggs should be turned back on - do it */
1238 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
Emmanuel Grumbach9cd70e82016-09-20 13:40:33 +03001239 struct iwl_mvm_add_sta_cmd cmd = {0};
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001240
1241 mvmsta->tid_disable_agg &= ~BIT(tid);
1242
1243 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1244 cmd.sta_id = mvmsta->sta_id;
1245 cmd.add_modify = STA_MODE_MODIFY;
1246 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1247 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1248 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1249
1250 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1251 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1252 if (!ret) {
1253 IWL_DEBUG_TX_QUEUES(mvm,
1254 "TXQ #%d is now aggregated again\n",
1255 queue);
1256
1257 /* Mark queue intenally as aggregating again */
1258 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1259 }
1260 }
1261
1262 spin_lock_bh(&mvm->queue_info_lock);
1263 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1264 spin_unlock_bh(&mvm->queue_info_lock);
1265}
1266
Johannes Berg99448a82018-07-04 11:38:34 +02001267/*
1268 * Remove inactive TIDs of a given queue.
1269 * If all queue TIDs are inactive - mark the queue as inactive
1270 * If only some the queue TIDs are inactive - unmap them from the queue
1271 */
1272static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1273 struct iwl_mvm_sta *mvmsta, int queue,
Johannes Berg90d2d942018-07-04 21:57:58 +02001274 unsigned long tid_bitmap,
Johannes Bergb3a87f12018-07-04 22:13:18 +02001275 unsigned long *unshare_queues,
1276 unsigned long *changetid_queues)
Johannes Berg99448a82018-07-04 11:38:34 +02001277{
1278 int tid;
1279
1280 lockdep_assert_held(&mvmsta->lock);
1281 lockdep_assert_held(&mvm->queue_info_lock);
1282
1283 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1284 return;
1285
1286 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1287 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1288 /* If some TFDs are still queued - don't mark TID as inactive */
1289 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1290 tid_bitmap &= ~BIT(tid);
1291
1292 /* Don't mark as inactive any TID that has an active BA */
1293 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1294 tid_bitmap &= ~BIT(tid);
1295 }
1296
1297 /* If all TIDs in the queue are inactive - mark queue as inactive. */
1298 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1299 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
1300
1301 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1)
1302 mvmsta->tid_data[tid].is_tid_active = false;
1303
1304 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n",
1305 queue);
1306 return;
1307 }
1308
1309 /*
1310 * If we are here, this is a shared queue and not all TIDs timed-out.
1311 * Remove the ones that did.
1312 */
1313 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1314 int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
Johannes Bergb3a87f12018-07-04 22:13:18 +02001315 u16 tid_bitmap;
Johannes Berg99448a82018-07-04 11:38:34 +02001316
1317 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1318 mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
Johannes Berg99448a82018-07-04 11:38:34 +02001319 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1320 mvmsta->tid_data[tid].is_tid_active = false;
1321
Johannes Bergb3a87f12018-07-04 22:13:18 +02001322 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1323
1324 /*
1325 * We need to take into account a situation in which a TXQ was
1326 * allocated to TID x, and then turned shared by adding TIDs y
1327 * and z. If TID x becomes inactive and is removed from the TXQ,
1328 * ownership must be given to one of the remaining TIDs.
1329 * This is mainly because if TID x continues - a new queue can't
1330 * be allocated for it as long as it is an owner of another TXQ.
1331 *
1332 * Mark this queue in the right bitmap, we'll send the command
1333 * to the firmware later.
1334 */
1335 if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1336 set_bit(queue, changetid_queues);
1337
Johannes Berg99448a82018-07-04 11:38:34 +02001338 IWL_DEBUG_TX_QUEUES(mvm,
1339 "Removing inactive TID %d from shared Q:%d\n",
1340 tid, queue);
1341 }
1342
1343 IWL_DEBUG_TX_QUEUES(mvm,
1344 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1345 mvm->queue_info[queue].tid_bitmap);
1346
1347 /*
1348 * There may be different TIDs with the same mac queues, so make
1349 * sure all TIDs have existing corresponding mac queues enabled
1350 */
1351 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1352 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1353 mvm->hw_queue_to_mac80211[queue] |=
1354 BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
1355 }
1356
1357 /* If the queue is marked as shared - "unshare" it */
Johannes Berg1c140892018-07-04 11:58:28 +02001358 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
Johannes Berg99448a82018-07-04 11:38:34 +02001359 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
Johannes Berg99448a82018-07-04 11:38:34 +02001360 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1361 queue);
Johannes Berg90d2d942018-07-04 21:57:58 +02001362 set_bit(queue, unshare_queues);
Johannes Berg99448a82018-07-04 11:38:34 +02001363 }
1364}
1365
1366static void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
1367{
Johannes Berg99448a82018-07-04 11:38:34 +02001368 unsigned long now = jiffies;
Johannes Berg90d2d942018-07-04 21:57:58 +02001369 unsigned long unshare_queues = 0;
Johannes Bergb3a87f12018-07-04 22:13:18 +02001370 unsigned long changetid_queues = 0;
Johannes Berg99448a82018-07-04 11:38:34 +02001371 int i;
1372
Johannes Bergdf2a22452018-07-04 16:21:03 +02001373 lockdep_assert_held(&mvm->mutex);
1374
Johannes Berg99448a82018-07-04 11:38:34 +02001375 if (iwl_mvm_has_new_tx_api(mvm))
1376 return;
1377
1378 spin_lock_bh(&mvm->queue_info_lock);
Johannes Berg99448a82018-07-04 11:38:34 +02001379
1380 rcu_read_lock();
1381
Johannes Berg459ab042018-07-04 13:06:53 +02001382 /* we skip the CMD queue below by starting at 1 */
1383 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1384
Johannes Berg99448a82018-07-04 11:38:34 +02001385 /*
1386 * If a queue times out - mark it as INACTIVE (don't remove right away
1387 * if we don't have to.) This is an optimization in case traffic comes
1388 * later, and we don't HAVE to use a currently-inactive queue
1389 */
Johannes Berg459ab042018-07-04 13:06:53 +02001390 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
Johannes Berg99448a82018-07-04 11:38:34 +02001391 struct ieee80211_sta *sta;
1392 struct iwl_mvm_sta *mvmsta;
1393 u8 sta_id;
1394 int tid;
1395 unsigned long inactive_tid_bitmap = 0;
1396 unsigned long queue_tid_bitmap;
1397
Johannes Berg99448a82018-07-04 11:38:34 +02001398 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
Johannes Berg459ab042018-07-04 13:06:53 +02001399 if (!queue_tid_bitmap)
1400 continue;
Johannes Berg99448a82018-07-04 11:38:34 +02001401
1402 /* If TXQ isn't in active use anyway - nothing to do here... */
1403 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
Johannes Berg459ab042018-07-04 13:06:53 +02001404 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
Johannes Berg99448a82018-07-04 11:38:34 +02001405 continue;
Johannes Berg99448a82018-07-04 11:38:34 +02001406
1407 /* Check to see if there are inactive TIDs on this queue */
1408 for_each_set_bit(tid, &queue_tid_bitmap,
1409 IWL_MAX_TID_COUNT + 1) {
1410 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1411 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1412 continue;
1413
1414 inactive_tid_bitmap |= BIT(tid);
1415 }
Johannes Berg99448a82018-07-04 11:38:34 +02001416
1417 /* If all TIDs are active - finish check on this queue */
1418 if (!inactive_tid_bitmap)
1419 continue;
1420
1421 /*
1422 * If we are here - the queue hadn't been served recently and is
1423 * in use
1424 */
1425
1426 sta_id = mvm->queue_info[i].ra_sta_id;
1427 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1428
1429 /*
1430 * If the STA doesn't exist anymore, it isn't an error. It could
1431 * be that it was removed since getting the queues, and in this
1432 * case it should've inactivated its queues anyway.
1433 */
1434 if (IS_ERR_OR_NULL(sta))
1435 continue;
1436
1437 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1438
Johannes Berg459ab042018-07-04 13:06:53 +02001439 /* this isn't so nice, but works OK due to the way we loop */
1440 spin_unlock(&mvm->queue_info_lock);
1441
1442 /* and we need this locking order */
1443 spin_lock(&mvmsta->lock);
Johannes Berg99448a82018-07-04 11:38:34 +02001444 spin_lock(&mvm->queue_info_lock);
1445 iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
Johannes Berg90d2d942018-07-04 21:57:58 +02001446 inactive_tid_bitmap,
Johannes Bergb3a87f12018-07-04 22:13:18 +02001447 &unshare_queues,
1448 &changetid_queues);
Johannes Berg459ab042018-07-04 13:06:53 +02001449 /* only unlock sta lock - we still need the queue info lock */
1450 spin_unlock(&mvmsta->lock);
Johannes Berg99448a82018-07-04 11:38:34 +02001451 }
1452
1453 rcu_read_unlock();
Johannes Berg459ab042018-07-04 13:06:53 +02001454 spin_unlock_bh(&mvm->queue_info_lock);
Johannes Bergdf2a22452018-07-04 16:21:03 +02001455
1456 /* Reconfigure queues requiring reconfiguation */
Johannes Berg90d2d942018-07-04 21:57:58 +02001457 for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1458 iwl_mvm_unshare_queue(mvm, i);
Johannes Bergb3a87f12018-07-04 22:13:18 +02001459 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1460 iwl_mvm_change_queue_tid(mvm, i);
Johannes Berg99448a82018-07-04 11:38:34 +02001461}
1462
Liad Kaufman24afba72015-07-28 18:56:08 +03001463static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1464{
1465 if (tid == IWL_MAX_TID_COUNT)
1466 return IEEE80211_AC_VO; /* MGMT */
1467
1468 return tid_to_mac80211_ac[tid];
1469}
1470
1471static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
1472 struct ieee80211_sta *sta, int tid)
1473{
1474 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1475 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1476 struct sk_buff *skb;
1477 struct ieee80211_hdr *hdr;
1478 struct sk_buff_head deferred_tx;
1479 u8 mac_queue;
1480 bool no_queue = false; /* Marks if there is a problem with the queue */
1481 u8 ac;
1482
1483 lockdep_assert_held(&mvm->mutex);
1484
1485 skb = skb_peek(&tid_data->deferred_tx_frames);
1486 if (!skb)
1487 return;
1488 hdr = (void *)skb->data;
1489
1490 ac = iwl_mvm_tid_to_ac_queue(tid);
1491 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1492
Sara Sharon6862fce2017-02-22 19:34:17 +02001493 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
Liad Kaufman24afba72015-07-28 18:56:08 +03001494 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1495 IWL_ERR(mvm,
1496 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1497 mvmsta->sta_id, tid);
1498
1499 /*
1500 * Mark queue as problematic so later the deferred traffic is
1501 * freed, as we can do nothing with it
1502 */
1503 no_queue = true;
1504 }
1505
1506 __skb_queue_head_init(&deferred_tx);
1507
Liad Kaufmand2515a92016-03-23 16:31:08 +02001508 /* Disable bottom-halves when entering TX path */
1509 local_bh_disable();
Liad Kaufman24afba72015-07-28 18:56:08 +03001510 spin_lock(&mvmsta->lock);
1511 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
Liad Kaufmanad5de732016-09-27 16:01:10 +03001512 mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
Liad Kaufman24afba72015-07-28 18:56:08 +03001513 spin_unlock(&mvmsta->lock);
1514
Liad Kaufman24afba72015-07-28 18:56:08 +03001515 while ((skb = __skb_dequeue(&deferred_tx)))
1516 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1517 ieee80211_free_txskb(mvm->hw, skb);
1518 local_bh_enable();
1519
1520 /* Wake queue */
1521 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1522}
1523
1524void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1525{
1526 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1527 add_stream_wk);
1528 struct ieee80211_sta *sta;
1529 struct iwl_mvm_sta *mvmsta;
1530 unsigned long deferred_tid_traffic;
Johannes Bergb3422282018-07-04 16:11:14 +02001531 int sta_id, tid;
Liad Kaufman24afba72015-07-28 18:56:08 +03001532
1533 mutex_lock(&mvm->mutex);
1534
Johannes Bergdf2a22452018-07-04 16:21:03 +02001535 iwl_mvm_inactivity_check(mvm);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001536
Liad Kaufman24afba72015-07-28 18:56:08 +03001537 /* Go over all stations with deferred traffic */
1538 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1539 IWL_MVM_STATION_COUNT) {
1540 clear_bit(sta_id, mvm->sta_deferred_frames);
1541 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1542 lockdep_is_held(&mvm->mutex));
1543 if (IS_ERR_OR_NULL(sta))
1544 continue;
1545
1546 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1547 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1548
1549 for_each_set_bit(tid, &deferred_tid_traffic,
1550 IWL_MAX_TID_COUNT + 1)
1551 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1552 }
1553
1554 mutex_unlock(&mvm->mutex);
1555}
1556
1557static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001558 struct ieee80211_sta *sta,
1559 enum nl80211_iftype vif_type)
Liad Kaufman24afba72015-07-28 18:56:08 +03001560{
1561 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1562 int queue;
Sara Sharon01796ff2016-11-16 17:04:36 +02001563 bool using_inactive_queue = false, same_sta = false;
Liad Kaufman24afba72015-07-28 18:56:08 +03001564
Sara Sharon396952e2017-02-22 19:40:55 +02001565 /* queue reserving is disabled on new TX path */
1566 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1567 return 0;
1568
Liad Kaufman9794c642015-08-19 17:34:28 +03001569 /*
1570 * Check for inactive queues, so we don't reach a situation where we
1571 * can't add a STA due to a shortage in queues that doesn't really exist
1572 */
1573 iwl_mvm_inactivity_check(mvm);
1574
Liad Kaufman24afba72015-07-28 18:56:08 +03001575 spin_lock_bh(&mvm->queue_info_lock);
1576
1577 /* Make sure we have free resources for this STA */
Liad Kaufmand5216a22015-08-09 15:50:51 +03001578 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
Johannes Berg1c140892018-07-04 11:58:28 +02001579 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
Liad Kaufmancf961e12015-08-13 19:16:08 +03001580 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1581 IWL_MVM_QUEUE_FREE))
Liad Kaufmand5216a22015-08-09 15:50:51 +03001582 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1583 else
Liad Kaufman9794c642015-08-19 17:34:28 +03001584 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1585 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001586 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +03001587 if (queue < 0) {
1588 spin_unlock_bh(&mvm->queue_info_lock);
1589 IWL_ERR(mvm, "No available queues for new station\n");
1590 return -ENOSPC;
Sara Sharon01796ff2016-11-16 17:04:36 +02001591 } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
1592 /*
1593 * If this queue is already allocated but inactive we'll need to
1594 * first free this queue before enabling it again, we'll mark
1595 * it as reserved to make sure no new traffic arrives on it
1596 */
1597 using_inactive_queue = true;
1598 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
Liad Kaufman24afba72015-07-28 18:56:08 +03001599 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03001600 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman24afba72015-07-28 18:56:08 +03001601
1602 spin_unlock_bh(&mvm->queue_info_lock);
1603
1604 mvmsta->reserved_queue = queue;
1605
Sara Sharon01796ff2016-11-16 17:04:36 +02001606 if (using_inactive_queue)
1607 iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
1608
Liad Kaufman24afba72015-07-28 18:56:08 +03001609 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1610 queue, mvmsta->sta_id);
1611
1612 return 0;
1613}
1614
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001615/*
1616 * In DQA mode, after a HW restart the queues should be allocated as before, in
1617 * order to avoid race conditions when there are shared queues. This function
1618 * does the re-mapping and queue allocation.
1619 *
1620 * Note that re-enabling aggregations isn't done in this function.
1621 */
1622static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1623 struct iwl_mvm_sta *mvm_sta)
1624{
1625 unsigned int wdg_timeout =
1626 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1627 int i;
1628 struct iwl_trans_txq_scd_cfg cfg = {
1629 .sta_id = mvm_sta->sta_id,
1630 .frame_limit = IWL_FRAME_LIMIT,
1631 };
1632
Johannes Berg03c902b2016-12-02 12:03:36 +01001633 /* Make sure reserved queue is still marked as such (if allocated) */
1634 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1635 mvm->queue_info[mvm_sta->reserved_queue].status =
1636 IWL_MVM_QUEUE_RESERVED;
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001637
1638 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1639 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1640 int txq_id = tid_data->txq_id;
1641 int ac;
1642 u8 mac_queue;
1643
Sara Sharon6862fce2017-02-22 19:34:17 +02001644 if (txq_id == IWL_MVM_INVALID_QUEUE)
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001645 continue;
1646
1647 skb_queue_head_init(&tid_data->deferred_tx_frames);
1648
1649 ac = tid_to_mac80211_ac[i];
1650 mac_queue = mvm_sta->vif->hw_queue[ac];
1651
Sara Sharon310181e2017-01-17 14:27:48 +02001652 if (iwl_mvm_has_new_tx_api(mvm)) {
1653 IWL_DEBUG_TX_QUEUES(mvm,
1654 "Re-mapping sta %d tid %d\n",
1655 mvm_sta->sta_id, i);
1656 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
1657 mvm_sta->sta_id,
1658 i, wdg_timeout);
1659 tid_data->txq_id = txq_id;
Liad Kaufman5d390512017-10-17 16:26:00 +03001660
1661 /*
1662 * Since we don't set the seq number after reset, and HW
1663 * sets it now, FW reset will cause the seq num to start
1664 * at 0 again, so driver will need to update it
1665 * internally as well, so it keeps in sync with real val
1666 */
1667 tid_data->seq_number = 0;
Sara Sharon310181e2017-01-17 14:27:48 +02001668 } else {
1669 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001670
Sara Sharon310181e2017-01-17 14:27:48 +02001671 cfg.tid = i;
Emmanuel Grumbachcf6c6ea2017-06-13 13:18:48 +03001672 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
Sara Sharon310181e2017-01-17 14:27:48 +02001673 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1674 txq_id ==
1675 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001676
Sara Sharon310181e2017-01-17 14:27:48 +02001677 IWL_DEBUG_TX_QUEUES(mvm,
1678 "Re-mapping sta %d tid %d to queue %d\n",
1679 mvm_sta->sta_id, i, txq_id);
1680
1681 iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
1682 wdg_timeout);
Sara Sharon34e10862017-02-23 13:15:07 +02001683 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
Sara Sharon310181e2017-01-17 14:27:48 +02001684 }
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001685 }
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001686}
1687
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001688static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1689 struct iwl_mvm_int_sta *sta,
1690 const u8 *addr,
1691 u16 mac_id, u16 color)
1692{
1693 struct iwl_mvm_add_sta_cmd cmd;
1694 int ret;
Luca Coelho3f497de2017-09-02 11:05:22 +03001695 u32 status = ADD_STA_SUCCESS;
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001696
1697 lockdep_assert_held(&mvm->mutex);
1698
1699 memset(&cmd, 0, sizeof(cmd));
1700 cmd.sta_id = sta->sta_id;
1701 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1702 color));
1703 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1704 cmd.station_type = sta->type;
1705
1706 if (!iwl_mvm_has_new_tx_api(mvm))
1707 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1708 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1709
1710 if (addr)
1711 memcpy(cmd.addr, addr, ETH_ALEN);
1712
1713 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1714 iwl_mvm_add_sta_cmd_size(mvm),
1715 &cmd, &status);
1716 if (ret)
1717 return ret;
1718
1719 switch (status & IWL_ADD_STA_STATUS_MASK) {
1720 case ADD_STA_SUCCESS:
1721 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1722 return 0;
1723 default:
1724 ret = -EIO;
1725 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1726 status);
1727 break;
1728 }
1729 return ret;
1730}
1731
Johannes Berg8ca151b2013-01-24 14:25:36 +01001732int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1733 struct ieee80211_vif *vif,
1734 struct ieee80211_sta *sta)
1735{
1736 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001737 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharona571f5f2015-12-07 12:50:58 +02001738 struct iwl_mvm_rxq_dup_data *dup_data;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001739 int i, ret, sta_id;
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001740 bool sta_update = false;
1741 unsigned int sta_flags = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001742
1743 lockdep_assert_held(&mvm->mutex);
1744
1745 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
Eliad Pellerb92e6612014-01-23 17:58:23 +02001746 sta_id = iwl_mvm_find_free_sta_id(mvm,
1747 ieee80211_vif_type_p2p(vif));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001748 else
1749 sta_id = mvm_sta->sta_id;
1750
Sara Sharon0ae98812017-01-04 14:53:58 +02001751 if (sta_id == IWL_MVM_INVALID_STA)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001752 return -ENOSPC;
1753
1754 spin_lock_init(&mvm_sta->lock);
1755
Johannes Bergc8f54702017-06-19 23:50:31 +02001756 /* if this is a HW restart re-alloc existing queues */
1757 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001758 struct iwl_mvm_int_sta tmp_sta = {
1759 .sta_id = sta_id,
1760 .type = mvm_sta->sta_type,
1761 };
1762
1763 /*
1764 * First add an empty station since allocating
1765 * a queue requires a valid station
1766 */
1767 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1768 mvmvif->id, mvmvif->color);
1769 if (ret)
1770 goto err;
1771
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001772 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001773 sta_update = true;
1774 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001775 goto update_fw;
1776 }
1777
Johannes Berg8ca151b2013-01-24 14:25:36 +01001778 mvm_sta->sta_id = sta_id;
1779 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1780 mvmvif->color);
1781 mvm_sta->vif = vif;
Liad Kaufmana58bb462017-05-28 14:20:04 +03001782 if (!mvm->trans->cfg->gen2)
1783 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1784 else
1785 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001786 mvm_sta->tx_protection = 0;
1787 mvm_sta->tt_tx_protection = false;
Sara Sharonced19f22017-02-06 19:09:32 +02001788 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001789
1790 /* HW restart, don't assume the memory has been zeroed */
Liad Kaufman69191af2015-09-01 18:50:22 +03001791 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
Johannes Berg8ca151b2013-01-24 14:25:36 +01001792 mvm_sta->tfd_queue_msk = 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001793
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001794 /* for HW restart - reset everything but the sequence number */
Liad Kaufman24afba72015-07-28 18:56:08 +03001795 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001796 u16 seq = mvm_sta->tid_data[i].seq_number;
1797 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1798 mvm_sta->tid_data[i].seq_number = seq;
Liad Kaufman24afba72015-07-28 18:56:08 +03001799
Liad Kaufman24afba72015-07-28 18:56:08 +03001800 /*
1801 * Mark all queues for this STA as unallocated and defer TX
1802 * frames until the queue is allocated
1803 */
Sara Sharon6862fce2017-02-22 19:34:17 +02001804 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman24afba72015-07-28 18:56:08 +03001805 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001806 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001807 mvm_sta->deferred_traffic_tid_map = 0;
Eyal Shapiraefed6642014-09-14 15:58:53 +03001808 mvm_sta->agg_tids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001809
Sara Sharona571f5f2015-12-07 12:50:58 +02001810 if (iwl_mvm_has_new_rx_api(mvm) &&
1811 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Johannes Berg92c4dca2017-06-07 10:35:54 +02001812 int q;
1813
Sara Sharona571f5f2015-12-07 12:50:58 +02001814 dup_data = kcalloc(mvm->trans->num_rx_queues,
Johannes Berg92c4dca2017-06-07 10:35:54 +02001815 sizeof(*dup_data), GFP_KERNEL);
Sara Sharona571f5f2015-12-07 12:50:58 +02001816 if (!dup_data)
1817 return -ENOMEM;
Johannes Berg92c4dca2017-06-07 10:35:54 +02001818 /*
1819 * Initialize all the last_seq values to 0xffff which can never
1820 * compare equal to the frame's seq_ctrl in the check in
1821 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1822 * number and fragmented packets don't reach that function.
1823 *
1824 * This thus allows receiving a packet with seqno 0 and the
1825 * retry bit set as the very first packet on a new TID.
1826 */
1827 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1828 memset(dup_data[q].last_seq, 0xff,
1829 sizeof(dup_data[q].last_seq));
Sara Sharona571f5f2015-12-07 12:50:58 +02001830 mvm_sta->dup_data = dup_data;
1831 }
1832
Johannes Bergc8f54702017-06-19 23:50:31 +02001833 if (!iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufmand5216a22015-08-09 15:50:51 +03001834 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1835 ieee80211_vif_type_p2p(vif));
Liad Kaufman24afba72015-07-28 18:56:08 +03001836 if (ret)
1837 goto err;
1838 }
1839
Gregory Greenman9f66a392017-11-05 18:49:48 +02001840 /*
1841 * if rs is registered with mac80211, then "add station" will be handled
1842 * via the corresponding ops, otherwise need to notify rate scaling here
1843 */
Emmanuel Grumbach4243edb2017-12-13 11:38:48 +02001844 if (iwl_mvm_has_tlc_offload(mvm))
Gregory Greenman9f66a392017-11-05 18:49:48 +02001845 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1846
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001847update_fw:
Shaul Triebitz732d06e2017-07-10 19:58:10 +03001848 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001849 if (ret)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001850 goto err;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001851
Johannes Berg9e848012014-08-04 14:33:42 +02001852 if (vif->type == NL80211_IFTYPE_STATION) {
1853 if (!sta->tdls) {
Sara Sharon0ae98812017-01-04 14:53:58 +02001854 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001855 mvmvif->ap_sta_id = sta_id;
1856 } else {
Sara Sharon0ae98812017-01-04 14:53:58 +02001857 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001858 }
1859 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001860
1861 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1862
1863 return 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001864
1865err:
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001866 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001867}
1868
1869int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1870 bool drain)
1871{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001872 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01001873 int ret;
1874 u32 status;
1875
1876 lockdep_assert_held(&mvm->mutex);
1877
1878 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1879 cmd.sta_id = mvmsta->sta_id;
1880 cmd.add_modify = STA_MODE_MODIFY;
1881 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1882 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1883
1884 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02001885 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1886 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001887 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001888 if (ret)
1889 return ret;
1890
Sara Sharon837c4da2016-01-07 16:50:45 +02001891 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001892 case ADD_STA_SUCCESS:
1893 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1894 mvmsta->sta_id);
1895 break;
1896 default:
1897 ret = -EIO;
1898 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1899 mvmsta->sta_id);
1900 break;
1901 }
1902
1903 return ret;
1904}
1905
1906/*
1907 * Remove a station from the FW table. Before sending the command to remove
1908 * the station validate that the station is indeed known to the driver (sanity
1909 * only).
1910 */
1911static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1912{
1913 struct ieee80211_sta *sta;
1914 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1915 .sta_id = sta_id,
1916 };
1917 int ret;
1918
1919 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1920 lockdep_is_held(&mvm->mutex));
1921
1922 /* Note: internal stations are marked as error values */
1923 if (!sta) {
1924 IWL_ERR(mvm, "Invalid station id\n");
1925 return -EINVAL;
1926 }
1927
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03001928 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01001929 sizeof(rm_sta_cmd), &rm_sta_cmd);
1930 if (ret) {
1931 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1932 return ret;
1933 }
1934
1935 return 0;
1936}
1937
Liad Kaufman24afba72015-07-28 18:56:08 +03001938static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1939 struct ieee80211_vif *vif,
1940 struct iwl_mvm_sta *mvm_sta)
1941{
1942 int ac;
1943 int i;
1944
1945 lockdep_assert_held(&mvm->mutex);
1946
1947 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
Sara Sharon6862fce2017-02-22 19:34:17 +02001948 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
Liad Kaufman24afba72015-07-28 18:56:08 +03001949 continue;
1950
1951 ac = iwl_mvm_tid_to_ac_queue(i);
1952 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1953 vif->hw_queue[ac], i, 0);
Sara Sharon6862fce2017-02-22 19:34:17 +02001954 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
Liad Kaufman24afba72015-07-28 18:56:08 +03001955 }
1956}
1957
Sara Sharond6d517b2017-03-06 10:16:11 +02001958int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1959 struct iwl_mvm_sta *mvm_sta)
1960{
Sharon Dvirbec95222017-06-12 11:40:33 +03001961 int i;
Sara Sharond6d517b2017-03-06 10:16:11 +02001962
1963 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1964 u16 txq_id;
Sharon Dvirbec95222017-06-12 11:40:33 +03001965 int ret;
Sara Sharond6d517b2017-03-06 10:16:11 +02001966
1967 spin_lock_bh(&mvm_sta->lock);
1968 txq_id = mvm_sta->tid_data[i].txq_id;
1969 spin_unlock_bh(&mvm_sta->lock);
1970
1971 if (txq_id == IWL_MVM_INVALID_QUEUE)
1972 continue;
1973
1974 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1975 if (ret)
Sharon Dvirbec95222017-06-12 11:40:33 +03001976 return ret;
Sara Sharond6d517b2017-03-06 10:16:11 +02001977 }
1978
Sharon Dvirbec95222017-06-12 11:40:33 +03001979 return 0;
Sara Sharond6d517b2017-03-06 10:16:11 +02001980}
1981
Johannes Berg8ca151b2013-01-24 14:25:36 +01001982int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1983 struct ieee80211_vif *vif,
1984 struct ieee80211_sta *sta)
1985{
1986 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001987 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharon94c3e612016-12-07 15:04:37 +02001988 u8 sta_id = mvm_sta->sta_id;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001989 int ret;
1990
1991 lockdep_assert_held(&mvm->mutex);
1992
Sara Sharona571f5f2015-12-07 12:50:58 +02001993 if (iwl_mvm_has_new_rx_api(mvm))
1994 kfree(mvm_sta->dup_data);
1995
Johannes Bergc8f54702017-06-19 23:50:31 +02001996 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1997 if (ret)
1998 return ret;
Sara Sharond6d517b2017-03-06 10:16:11 +02001999
Johannes Bergc8f54702017-06-19 23:50:31 +02002000 /* flush its queues here since we are freeing mvm_sta */
2001 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
2002 if (ret)
2003 return ret;
2004 if (iwl_mvm_has_new_tx_api(mvm)) {
2005 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
2006 } else {
2007 u32 q_mask = mvm_sta->tfd_queue_msk;
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02002008
Johannes Bergc8f54702017-06-19 23:50:31 +02002009 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2010 q_mask);
2011 }
2012 if (ret)
2013 return ret;
Liad Kaufman56214742016-09-22 15:14:08 +03002014
Johannes Bergc8f54702017-06-19 23:50:31 +02002015 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
Liad Kaufmana0315dea2016-07-07 13:25:59 +03002016
Johannes Bergc8f54702017-06-19 23:50:31 +02002017 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
Liad Kaufmana0315dea2016-07-07 13:25:59 +03002018
Johannes Bergc8f54702017-06-19 23:50:31 +02002019 /* If there is a TXQ still marked as reserved - free it */
2020 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
2021 u8 reserved_txq = mvm_sta->reserved_queue;
2022 enum iwl_mvm_queue_status *status;
2023
2024 /*
2025 * If no traffic has gone through the reserved TXQ - it
2026 * is still marked as IWL_MVM_QUEUE_RESERVED, and
2027 * should be manually marked as free again
2028 */
2029 spin_lock_bh(&mvm->queue_info_lock);
2030 status = &mvm->queue_info[reserved_txq].status;
2031 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
2032 (*status != IWL_MVM_QUEUE_FREE),
2033 "sta_id %d reserved txq %d status %d",
2034 sta_id, reserved_txq, *status)) {
Liad Kaufmana0315dea2016-07-07 13:25:59 +03002035 spin_unlock_bh(&mvm->queue_info_lock);
Johannes Bergc8f54702017-06-19 23:50:31 +02002036 return -EINVAL;
Liad Kaufmana0315dea2016-07-07 13:25:59 +03002037 }
2038
Johannes Bergc8f54702017-06-19 23:50:31 +02002039 *status = IWL_MVM_QUEUE_FREE;
2040 spin_unlock_bh(&mvm->queue_info_lock);
2041 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002042
Johannes Bergc8f54702017-06-19 23:50:31 +02002043 if (vif->type == NL80211_IFTYPE_STATION &&
2044 mvmvif->ap_sta_id == sta_id) {
2045 /* if associated - we can't remove the AP STA now */
2046 if (vif->bss_conf.assoc)
2047 return ret;
Eliad Peller37577fe2013-12-05 17:19:39 +02002048
Johannes Bergc8f54702017-06-19 23:50:31 +02002049 /* unassoc - go ahead - remove the AP STA now */
2050 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
2051
2052 /* clear d0i3_ap_sta_id if no longer relevant */
2053 if (mvm->d0i3_ap_sta_id == sta_id)
2054 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002055 }
2056
2057 /*
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03002058 * This shouldn't happen - the TDLS channel switch should be canceled
2059 * before the STA is removed.
2060 */
Sara Sharon94c3e612016-12-07 15:04:37 +02002061 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
Sara Sharon0ae98812017-01-04 14:53:58 +02002062 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03002063 cancel_delayed_work(&mvm->tdls_cs.dwork);
2064 }
2065
2066 /*
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03002067 * Make sure that the tx response code sees the station as -EBUSY and
2068 * calls the drain worker.
2069 */
2070 spin_lock_bh(&mvm_sta->lock);
Johannes Bergc8f54702017-06-19 23:50:31 +02002071 spin_unlock_bh(&mvm_sta->lock);
Sara Sharon94c3e612016-12-07 15:04:37 +02002072
Johannes Bergc8f54702017-06-19 23:50:31 +02002073 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
2074 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002075
2076 return ret;
2077}
2078
2079int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
2080 struct ieee80211_vif *vif,
2081 u8 sta_id)
2082{
2083 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
2084
2085 lockdep_assert_held(&mvm->mutex);
2086
Monam Agarwalc531c772014-03-24 00:05:56 +05302087 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002088 return ret;
2089}
2090
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002091int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
2092 struct iwl_mvm_int_sta *sta,
Sara Sharonced19f22017-02-06 19:09:32 +02002093 u32 qmask, enum nl80211_iftype iftype,
2094 enum iwl_sta_type type)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002095{
Avraham Sterndf65c8d2018-03-06 14:10:49 +02002096 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
2097 sta->sta_id == IWL_MVM_INVALID_STA) {
Eliad Pellerb92e6612014-01-23 17:58:23 +02002098 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
Sara Sharon0ae98812017-01-04 14:53:58 +02002099 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01002100 return -ENOSPC;
2101 }
2102
2103 sta->tfd_queue_msk = qmask;
Sara Sharonced19f22017-02-06 19:09:32 +02002104 sta->type = type;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002105
2106 /* put a non-NULL value so iterating over the stations won't stop */
2107 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
2108 return 0;
2109}
2110
Sara Sharon26d6c162017-01-03 12:00:19 +02002111void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002112{
Monam Agarwalc531c772014-03-24 00:05:56 +05302113 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002114 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
Sara Sharon0ae98812017-01-04 14:53:58 +02002115 sta->sta_id = IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002116}
2117
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002118static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
2119 u8 sta_id, u8 fifo)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002120{
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02002121 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
2122 mvm->cfg->base_params->wd_timeout :
2123 IWL_WATCHDOG_DISABLED;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002124
Sara Sharon310181e2017-01-17 14:27:48 +02002125 if (iwl_mvm_has_new_tx_api(mvm)) {
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002126 int tvqm_queue =
2127 iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
2128 IWL_MAX_TID_COUNT,
2129 wdg_timeout);
2130 *queue = tvqm_queue;
Johannes Bergc8f54702017-06-19 23:50:31 +02002131 } else {
Liad Kaufman28d07932015-09-01 16:36:25 +03002132 struct iwl_trans_txq_scd_cfg cfg = {
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002133 .fifo = fifo,
2134 .sta_id = sta_id,
Liad Kaufman28d07932015-09-01 16:36:25 +03002135 .tid = IWL_MAX_TID_COUNT,
2136 .aggregate = false,
2137 .frame_limit = IWL_FRAME_LIMIT,
2138 };
2139
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002140 iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
Liad Kaufman28d07932015-09-01 16:36:25 +03002141 }
Sara Sharonc5a719e2016-11-15 10:20:48 +02002142}
2143
2144int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
2145{
2146 int ret;
2147
2148 lockdep_assert_held(&mvm->mutex);
2149
2150 /* Allocate aux station and assign to it the aux queue */
2151 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
Sara Sharonced19f22017-02-06 19:09:32 +02002152 NL80211_IFTYPE_UNSPECIFIED,
2153 IWL_STA_AUX_ACTIVITY);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002154 if (ret)
2155 return ret;
2156
2157 /* Map Aux queue to fifo - needs to happen before adding Aux station */
2158 if (!iwl_mvm_has_new_tx_api(mvm))
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002159 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2160 mvm->aux_sta.sta_id,
2161 IWL_MVM_TX_FIFO_MCAST);
Liad Kaufman28d07932015-09-01 16:36:25 +03002162
Johannes Berg8ca151b2013-01-24 14:25:36 +01002163 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
2164 MAC_INDEX_AUX, 0);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002165 if (ret) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002166 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002167 return ret;
2168 }
2169
2170 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02002171 * For 22000 firmware and on we cannot add queue to a station unknown
Sara Sharonc5a719e2016-11-15 10:20:48 +02002172 * to firmware so enable queue here - after the station was added
2173 */
2174 if (iwl_mvm_has_new_tx_api(mvm))
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002175 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2176 mvm->aux_sta.sta_id,
2177 IWL_MVM_TX_FIFO_MCAST);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002178
2179 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002180}
2181
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002182int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2183{
2184 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002185 int ret;
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002186
2187 lockdep_assert_held(&mvm->mutex);
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002188
2189 /* Map snif queue to fifo - must happen before adding snif station */
2190 if (!iwl_mvm_has_new_tx_api(mvm))
2191 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2192 mvm->snif_sta.sta_id,
2193 IWL_MVM_TX_FIFO_BE);
2194
2195 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002196 mvmvif->id, 0);
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002197 if (ret)
2198 return ret;
2199
2200 /*
2201 * For 22000 firmware and on we cannot add queue to a station unknown
2202 * to firmware so enable queue here - after the station was added
2203 */
2204 if (iwl_mvm_has_new_tx_api(mvm))
2205 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2206 mvm->snif_sta.sta_id,
2207 IWL_MVM_TX_FIFO_BE);
2208
2209 return 0;
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002210}
2211
2212int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2213{
2214 int ret;
2215
2216 lockdep_assert_held(&mvm->mutex);
2217
Emmanuel Grumbachb13f43a2017-11-19 10:35:14 +02002218 iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
2219 IWL_MAX_TID_COUNT, 0);
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02002220 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2221 if (ret)
2222 IWL_WARN(mvm, "Failed sending remove station\n");
2223
2224 return ret;
2225}
2226
2227void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2228{
2229 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2230}
2231
Johannes Berg712b24a2014-08-04 14:14:14 +02002232void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
2233{
2234 lockdep_assert_held(&mvm->mutex);
2235
2236 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2237}
2238
Johannes Berg8ca151b2013-01-24 14:25:36 +01002239/*
2240 * Send the add station command for the vif's broadcast station.
2241 * Assumes that the station was already allocated.
2242 *
2243 * @mvm: the mvm component
2244 * @vif: the interface to which the broadcast station is added
2245 * @bsta: the broadcast station to add.
2246 */
Johannes Berg013290a2014-08-04 13:38:48 +02002247int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002248{
2249 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002250 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg5023d962013-07-31 14:07:43 +02002251 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
Johannes Berga4243402014-01-20 23:46:38 +01002252 const u8 *baddr = _baddr;
Johannes Berg7daa7622017-02-24 12:02:22 +01002253 int queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002254 int ret;
Sara Sharonc5a719e2016-11-15 10:20:48 +02002255 unsigned int wdg_timeout =
2256 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2257 struct iwl_trans_txq_scd_cfg cfg = {
2258 .fifo = IWL_MVM_TX_FIFO_VO,
2259 .sta_id = mvmvif->bcast_sta.sta_id,
2260 .tid = IWL_MAX_TID_COUNT,
2261 .aggregate = false,
2262 .frame_limit = IWL_FRAME_LIMIT,
2263 };
Johannes Berg8ca151b2013-01-24 14:25:36 +01002264
2265 lockdep_assert_held(&mvm->mutex);
2266
Johannes Bergc8f54702017-06-19 23:50:31 +02002267 if (!iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufman4d339982017-03-21 17:13:16 +02002268 if (vif->type == NL80211_IFTYPE_AP ||
2269 vif->type == NL80211_IFTYPE_ADHOC)
Sara Sharon49f71712017-01-09 12:07:16 +02002270 queue = mvm->probe_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002271 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
Sara Sharon49f71712017-01-09 12:07:16 +02002272 queue = mvm->p2p_dev_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002273 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
Liad Kaufmande24f632015-08-04 15:19:18 +03002274 return -EINVAL;
2275
Liad Kaufmandf88c082016-11-24 15:31:00 +02002276 bsta->tfd_queue_msk |= BIT(queue);
Sara Sharonc5a719e2016-11-15 10:20:48 +02002277
Sara Sharon310181e2017-01-17 14:27:48 +02002278 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
2279 &cfg, wdg_timeout);
Liad Kaufmande24f632015-08-04 15:19:18 +03002280 }
2281
Johannes Berg5023d962013-07-31 14:07:43 +02002282 if (vif->type == NL80211_IFTYPE_ADHOC)
2283 baddr = vif->bss_conf.bssid;
2284
Sara Sharon0ae98812017-01-04 14:53:58 +02002285 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01002286 return -ENOSPC;
2287
Liad Kaufmandf88c082016-11-24 15:31:00 +02002288 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2289 mvmvif->id, mvmvif->color);
2290 if (ret)
2291 return ret;
2292
2293 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02002294 * For 22000 firmware and on we cannot add queue to a station unknown
Sara Sharonc5a719e2016-11-15 10:20:48 +02002295 * to firmware so enable queue here - after the station was added
Liad Kaufmandf88c082016-11-24 15:31:00 +02002296 */
Sara Sharon310181e2017-01-17 14:27:48 +02002297 if (iwl_mvm_has_new_tx_api(mvm)) {
Johannes Berg7daa7622017-02-24 12:02:22 +01002298 queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
2299 bsta->sta_id,
2300 IWL_MAX_TID_COUNT,
2301 wdg_timeout);
2302
Luca Coelho7b758a12017-06-20 13:40:03 +03002303 if (vif->type == NL80211_IFTYPE_AP ||
2304 vif->type == NL80211_IFTYPE_ADHOC)
Sara Sharon310181e2017-01-17 14:27:48 +02002305 mvm->probe_queue = queue;
2306 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2307 mvm->p2p_dev_queue = queue;
Sara Sharon310181e2017-01-17 14:27:48 +02002308 }
Liad Kaufmandf88c082016-11-24 15:31:00 +02002309
2310 return 0;
2311}
2312
2313static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2314 struct ieee80211_vif *vif)
2315{
2316 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002317 int queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002318
2319 lockdep_assert_held(&mvm->mutex);
2320
Sara Sharond49394a2017-03-05 13:01:08 +02002321 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
2322
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002323 switch (vif->type) {
2324 case NL80211_IFTYPE_AP:
2325 case NL80211_IFTYPE_ADHOC:
2326 queue = mvm->probe_queue;
2327 break;
2328 case NL80211_IFTYPE_P2P_DEVICE:
2329 queue = mvm->p2p_dev_queue;
2330 break;
2331 default:
2332 WARN(1, "Can't free bcast queue on vif type %d\n",
2333 vif->type);
2334 return;
Liad Kaufmandf88c082016-11-24 15:31:00 +02002335 }
2336
Mordechai Goodsteind167e812017-05-10 16:42:53 +03002337 iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
2338 if (iwl_mvm_has_new_tx_api(mvm))
2339 return;
2340
2341 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2342 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002343}
2344
2345/* Send the FW a request to remove the station from it's internal data
2346 * structures, but DO NOT remove the entry from the local data structures. */
Johannes Berg013290a2014-08-04 13:38:48 +02002347int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002348{
Johannes Berg013290a2014-08-04 13:38:48 +02002349 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002350 int ret;
2351
2352 lockdep_assert_held(&mvm->mutex);
2353
Johannes Bergc8f54702017-06-19 23:50:31 +02002354 iwl_mvm_free_bcast_sta_queues(mvm, vif);
Liad Kaufmandf88c082016-11-24 15:31:00 +02002355
Johannes Berg013290a2014-08-04 13:38:48 +02002356 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002357 if (ret)
2358 IWL_WARN(mvm, "Failed sending remove station\n");
2359 return ret;
2360}
2361
Johannes Berg013290a2014-08-04 13:38:48 +02002362int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2363{
2364 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002365
2366 lockdep_assert_held(&mvm->mutex);
2367
Johannes Bergc8f54702017-06-19 23:50:31 +02002368 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
Sara Sharonced19f22017-02-06 19:09:32 +02002369 ieee80211_vif_type_p2p(vif),
2370 IWL_STA_GENERAL_PURPOSE);
Johannes Berg013290a2014-08-04 13:38:48 +02002371}
2372
Johannes Berg8ca151b2013-01-24 14:25:36 +01002373/* Allocate a new station entry for the broadcast station to the given vif,
2374 * and send it to the FW.
2375 * Note that each P2P mac should have its own broadcast station.
2376 *
2377 * @mvm: the mvm component
2378 * @vif: the interface to which the broadcast station is added
2379 * @bsta: the broadcast station to add. */
Luca Coelhod1973582017-06-22 16:00:25 +03002380int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002381{
2382 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002383 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002384 int ret;
2385
2386 lockdep_assert_held(&mvm->mutex);
2387
Johannes Berg013290a2014-08-04 13:38:48 +02002388 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002389 if (ret)
2390 return ret;
2391
Johannes Berg013290a2014-08-04 13:38:48 +02002392 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002393
2394 if (ret)
2395 iwl_mvm_dealloc_int_sta(mvm, bsta);
Johannes Berg013290a2014-08-04 13:38:48 +02002396
Johannes Berg8ca151b2013-01-24 14:25:36 +01002397 return ret;
2398}
2399
Johannes Berg013290a2014-08-04 13:38:48 +02002400void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2401{
2402 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2403
2404 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2405}
2406
Johannes Berg8ca151b2013-01-24 14:25:36 +01002407/*
2408 * Send the FW a request to remove the station from it's internal data
2409 * structures, and in addition remove it from the local data structure.
2410 */
Luca Coelhod1973582017-06-22 16:00:25 +03002411int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002412{
2413 int ret;
2414
2415 lockdep_assert_held(&mvm->mutex);
2416
Johannes Berg013290a2014-08-04 13:38:48 +02002417 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002418
Johannes Berg013290a2014-08-04 13:38:48 +02002419 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2420
Johannes Berg8ca151b2013-01-24 14:25:36 +01002421 return ret;
2422}
2423
Sara Sharon26d6c162017-01-03 12:00:19 +02002424/*
2425 * Allocate a new station entry for the multicast station to the given vif,
2426 * and send it to the FW.
2427 * Note that each AP/GO mac should have its own multicast station.
2428 *
2429 * @mvm: the mvm component
2430 * @vif: the interface to which the multicast station is added
2431 */
2432int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2433{
2434 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2435 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2436 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2437 const u8 *maddr = _maddr;
2438 struct iwl_trans_txq_scd_cfg cfg = {
2439 .fifo = IWL_MVM_TX_FIFO_MCAST,
2440 .sta_id = msta->sta_id,
Ilan Peer6508de02018-01-25 15:22:41 +02002441 .tid = 0,
Sara Sharon26d6c162017-01-03 12:00:19 +02002442 .aggregate = false,
2443 .frame_limit = IWL_FRAME_LIMIT,
2444 };
2445 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2446 int ret;
2447
2448 lockdep_assert_held(&mvm->mutex);
2449
Liad Kaufmanee48b722017-03-21 17:13:16 +02002450 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2451 vif->type != NL80211_IFTYPE_ADHOC))
Sara Sharon26d6c162017-01-03 12:00:19 +02002452 return -ENOTSUPP;
2453
Sara Sharonced19f22017-02-06 19:09:32 +02002454 /*
Sara Sharonfc07bd82017-12-21 15:05:28 +02002455 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2456 * invalid, so make sure we use the queue we want.
2457 * Note that this is done here as we want to avoid making DQA
2458 * changes in mac80211 layer.
2459 */
2460 if (vif->type == NL80211_IFTYPE_ADHOC) {
2461 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2462 mvmvif->cab_queue = vif->cab_queue;
2463 }
2464
2465 /*
Sara Sharonced19f22017-02-06 19:09:32 +02002466 * While in previous FWs we had to exclude cab queue from TFD queue
2467 * mask, now it is needed as any other queue.
2468 */
2469 if (!iwl_mvm_has_new_tx_api(mvm) &&
2470 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2471 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2472 &cfg, timeout);
2473 msta->tfd_queue_msk |= BIT(vif->cab_queue);
2474 }
Sara Sharon26d6c162017-01-03 12:00:19 +02002475 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2476 mvmvif->id, mvmvif->color);
2477 if (ret) {
2478 iwl_mvm_dealloc_int_sta(mvm, msta);
2479 return ret;
2480 }
2481
2482 /*
2483 * Enable cab queue after the ADD_STA command is sent.
Luca Coelho2f7a3862017-11-15 15:07:34 +02002484 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
Sara Sharonced19f22017-02-06 19:09:32 +02002485 * command with unknown station id, and for FW that doesn't support
2486 * station API since the cab queue is not included in the
2487 * tfd_queue_mask.
Sara Sharon26d6c162017-01-03 12:00:19 +02002488 */
Sara Sharon310181e2017-01-17 14:27:48 +02002489 if (iwl_mvm_has_new_tx_api(mvm)) {
2490 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2491 msta->sta_id,
Ilan Peer6508de02018-01-25 15:22:41 +02002492 0,
Sara Sharon310181e2017-01-17 14:27:48 +02002493 timeout);
Sara Sharone2af3fa2017-02-22 19:35:10 +02002494 mvmvif->cab_queue = queue;
Sara Sharonced19f22017-02-06 19:09:32 +02002495 } else if (!fw_has_api(&mvm->fw->ucode_capa,
Sara Sharonfc07bd82017-12-21 15:05:28 +02002496 IWL_UCODE_TLV_API_STA_TYPE))
Sara Sharon310181e2017-01-17 14:27:48 +02002497 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2498 &cfg, timeout);
Sara Sharon26d6c162017-01-03 12:00:19 +02002499
Avraham Stern337bfc92018-06-04 15:10:18 +03002500 if (mvmvif->ap_wep_key) {
2501 u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
2502
2503 if (key_offset == STA_KEY_IDX_INVALID)
2504 return -ENOSPC;
2505
2506 ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
2507 mvmvif->ap_wep_key, 1, 0, NULL, 0,
2508 key_offset, 0);
2509 if (ret)
2510 return ret;
2511 }
2512
Sara Sharon26d6c162017-01-03 12:00:19 +02002513 return 0;
2514}
2515
2516/*
2517 * Send the FW a request to remove the station from it's internal data
2518 * structures, and in addition remove it from the local data structure.
2519 */
2520int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2521{
2522 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2523 int ret;
2524
2525 lockdep_assert_held(&mvm->mutex);
2526
Sara Sharond49394a2017-03-05 13:01:08 +02002527 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2528
Sara Sharone2af3fa2017-02-22 19:35:10 +02002529 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
Ilan Peer6508de02018-01-25 15:22:41 +02002530 0, 0);
Sara Sharon26d6c162017-01-03 12:00:19 +02002531
2532 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2533 if (ret)
2534 IWL_WARN(mvm, "Failed sending remove station\n");
2535
2536 return ret;
2537}
2538
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002539#define IWL_MAX_RX_BA_SESSIONS 16
2540
Sara Sharonb915c102016-03-23 16:32:02 +02002541static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
Sara Sharon10b2b202016-03-20 16:23:41 +02002542{
Sara Sharonb915c102016-03-23 16:32:02 +02002543 struct iwl_mvm_delba_notif notif = {
2544 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2545 .metadata.sync = 1,
2546 .delba.baid = baid,
Sara Sharon10b2b202016-03-20 16:23:41 +02002547 };
Sara Sharonb915c102016-03-23 16:32:02 +02002548 iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
2549};
Sara Sharon10b2b202016-03-20 16:23:41 +02002550
Sara Sharonb915c102016-03-23 16:32:02 +02002551static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2552 struct iwl_mvm_baid_data *data)
2553{
2554 int i;
2555
2556 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2557
2558 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2559 int j;
2560 struct iwl_mvm_reorder_buffer *reorder_buf =
2561 &data->reorder_buf[i];
Johannes Bergdfdddd92017-09-26 12:24:51 +02002562 struct iwl_mvm_reorder_buf_entry *entries =
2563 &data->entries[i * data->entries_per_queue];
Sara Sharonb915c102016-03-23 16:32:02 +02002564
Sara Sharon06904052016-02-28 20:28:17 +02002565 spin_lock_bh(&reorder_buf->lock);
2566 if (likely(!reorder_buf->num_stored)) {
2567 spin_unlock_bh(&reorder_buf->lock);
Sara Sharonb915c102016-03-23 16:32:02 +02002568 continue;
Sara Sharon06904052016-02-28 20:28:17 +02002569 }
Sara Sharonb915c102016-03-23 16:32:02 +02002570
2571 /*
2572 * This shouldn't happen in regular DELBA since the internal
2573 * delBA notification should trigger a release of all frames in
2574 * the reorder buffer.
2575 */
2576 WARN_ON(1);
2577
2578 for (j = 0; j < reorder_buf->buf_size; j++)
Johannes Bergdfdddd92017-09-26 12:24:51 +02002579 __skb_queue_purge(&entries[j].e.frames);
Sara Sharon06904052016-02-28 20:28:17 +02002580 /*
2581 * Prevent timer re-arm. This prevents a very far fetched case
2582 * where we timed out on the notification. There may be prior
2583 * RX frames pending in the RX queue before the notification
2584 * that might get processed between now and the actual deletion
2585 * and we would re-arm the timer although we are deleting the
2586 * reorder buffer.
2587 */
2588 reorder_buf->removed = true;
2589 spin_unlock_bh(&reorder_buf->lock);
2590 del_timer_sync(&reorder_buf->reorder_timer);
Sara Sharonb915c102016-03-23 16:32:02 +02002591 }
2592}
2593
2594static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
Sara Sharonb915c102016-03-23 16:32:02 +02002595 struct iwl_mvm_baid_data *data,
Luca Coelho514c30692018-06-24 11:59:54 +03002596 u16 ssn, u16 buf_size)
Sara Sharonb915c102016-03-23 16:32:02 +02002597{
2598 int i;
2599
2600 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2601 struct iwl_mvm_reorder_buffer *reorder_buf =
2602 &data->reorder_buf[i];
Johannes Bergdfdddd92017-09-26 12:24:51 +02002603 struct iwl_mvm_reorder_buf_entry *entries =
2604 &data->entries[i * data->entries_per_queue];
Sara Sharonb915c102016-03-23 16:32:02 +02002605 int j;
2606
2607 reorder_buf->num_stored = 0;
2608 reorder_buf->head_sn = ssn;
2609 reorder_buf->buf_size = buf_size;
Sara Sharon06904052016-02-28 20:28:17 +02002610 /* rx reorder timer */
Kees Cook8cef5342017-10-24 02:29:37 -07002611 timer_setup(&reorder_buf->reorder_timer,
2612 iwl_mvm_reorder_timer_expired, 0);
Sara Sharon06904052016-02-28 20:28:17 +02002613 spin_lock_init(&reorder_buf->lock);
2614 reorder_buf->mvm = mvm;
Sara Sharonb915c102016-03-23 16:32:02 +02002615 reorder_buf->queue = i;
Sara Sharon5d43eab2017-02-02 12:51:39 +02002616 reorder_buf->valid = false;
Sara Sharonb915c102016-03-23 16:32:02 +02002617 for (j = 0; j < reorder_buf->buf_size; j++)
Johannes Bergdfdddd92017-09-26 12:24:51 +02002618 __skb_queue_head_init(&entries[j].e.frames);
Sara Sharonb915c102016-03-23 16:32:02 +02002619 }
Sara Sharon10b2b202016-03-20 16:23:41 +02002620}
2621
Johannes Berg8ca151b2013-01-24 14:25:36 +01002622int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Luca Coelho514c30692018-06-24 11:59:54 +03002623 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002624{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002625 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002626 struct iwl_mvm_add_sta_cmd cmd = {};
Sara Sharon10b2b202016-03-20 16:23:41 +02002627 struct iwl_mvm_baid_data *baid_data = NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002628 int ret;
2629 u32 status;
2630
2631 lockdep_assert_held(&mvm->mutex);
2632
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002633 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2634 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2635 return -ENOSPC;
2636 }
2637
Sara Sharon10b2b202016-03-20 16:23:41 +02002638 if (iwl_mvm_has_new_rx_api(mvm) && start) {
Johannes Bergdfdddd92017-09-26 12:24:51 +02002639 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2640
2641 /* sparse doesn't like the __align() so don't check */
2642#ifndef __CHECKER__
2643 /*
2644 * The division below will be OK if either the cache line size
2645 * can be divided by the entry size (ALIGN will round up) or if
2646 * if the entry size can be divided by the cache line size, in
2647 * which case the ALIGN() will do nothing.
2648 */
2649 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2650 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2651#endif
2652
2653 /*
2654 * Upward align the reorder buffer size to fill an entire cache
2655 * line for each queue, to avoid sharing cache lines between
2656 * different queues.
2657 */
2658 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2659
Sara Sharon10b2b202016-03-20 16:23:41 +02002660 /*
2661 * Allocate here so if allocation fails we can bail out early
2662 * before starting the BA session in the firmware
2663 */
Sara Sharonb915c102016-03-23 16:32:02 +02002664 baid_data = kzalloc(sizeof(*baid_data) +
2665 mvm->trans->num_rx_queues *
Johannes Bergdfdddd92017-09-26 12:24:51 +02002666 reorder_buf_size,
Sara Sharonb915c102016-03-23 16:32:02 +02002667 GFP_KERNEL);
Sara Sharon10b2b202016-03-20 16:23:41 +02002668 if (!baid_data)
2669 return -ENOMEM;
Johannes Bergdfdddd92017-09-26 12:24:51 +02002670
2671 /*
2672 * This division is why we need the above BUILD_BUG_ON(),
2673 * if that doesn't hold then this will not be right.
2674 */
2675 baid_data->entries_per_queue =
2676 reorder_buf_size / sizeof(baid_data->entries[0]);
Sara Sharon10b2b202016-03-20 16:23:41 +02002677 }
2678
Johannes Berg8ca151b2013-01-24 14:25:36 +01002679 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2680 cmd.sta_id = mvm_sta->sta_id;
2681 cmd.add_modify = STA_MODE_MODIFY;
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002682 if (start) {
2683 cmd.add_immediate_ba_tid = (u8) tid;
2684 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
Luca Coelho514c30692018-06-24 11:59:54 +03002685 cmd.rx_ba_window = cpu_to_le16(buf_size);
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002686 } else {
2687 cmd.remove_immediate_ba_tid = (u8) tid;
2688 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002689 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2690 STA_MODIFY_REMOVE_BA_TID;
2691
2692 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002693 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2694 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002695 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002696 if (ret)
Sara Sharon10b2b202016-03-20 16:23:41 +02002697 goto out_free;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002698
Sara Sharon837c4da2016-01-07 16:50:45 +02002699 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002700 case ADD_STA_SUCCESS:
Sara Sharon35263a02016-06-21 12:12:10 +03002701 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2702 start ? "start" : "stopp");
Johannes Berg8ca151b2013-01-24 14:25:36 +01002703 break;
2704 case ADD_STA_IMMEDIATE_BA_FAILURE:
2705 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2706 ret = -ENOSPC;
2707 break;
2708 default:
2709 ret = -EIO;
2710 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2711 start ? "start" : "stopp", status);
2712 break;
2713 }
2714
Sara Sharon10b2b202016-03-20 16:23:41 +02002715 if (ret)
2716 goto out_free;
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002717
Sara Sharon10b2b202016-03-20 16:23:41 +02002718 if (start) {
2719 u8 baid;
2720
2721 mvm->rx_ba_sessions++;
2722
2723 if (!iwl_mvm_has_new_rx_api(mvm))
2724 return 0;
2725
2726 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2727 ret = -EINVAL;
2728 goto out_free;
2729 }
2730 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2731 IWL_ADD_STA_BAID_SHIFT);
2732 baid_data->baid = baid;
2733 baid_data->timeout = timeout;
2734 baid_data->last_rx = jiffies;
Kees Cook8cef5342017-10-24 02:29:37 -07002735 baid_data->rcu_ptr = &mvm->baid_map[baid];
2736 timer_setup(&baid_data->session_timer,
2737 iwl_mvm_rx_agg_session_expired, 0);
Sara Sharon10b2b202016-03-20 16:23:41 +02002738 baid_data->mvm = mvm;
2739 baid_data->tid = tid;
2740 baid_data->sta_id = mvm_sta->sta_id;
2741
2742 mvm_sta->tid_to_baid[tid] = baid;
2743 if (timeout)
2744 mod_timer(&baid_data->session_timer,
2745 TU_TO_EXP_TIME(timeout * 2));
2746
Sara Sharon3f1c4c52017-10-02 12:07:59 +03002747 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
Sara Sharon10b2b202016-03-20 16:23:41 +02002748 /*
2749 * protect the BA data with RCU to cover a case where our
2750 * internal RX sync mechanism will timeout (not that it's
2751 * supposed to happen) and we will free the session data while
2752 * RX is being processed in parallel
2753 */
Sara Sharon35263a02016-06-21 12:12:10 +03002754 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2755 mvm_sta->sta_id, tid, baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002756 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2757 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
Sara Sharon60dec522016-06-21 14:14:08 +03002758 } else {
Sara Sharon10b2b202016-03-20 16:23:41 +02002759 u8 baid = mvm_sta->tid_to_baid[tid];
2760
Sara Sharon60dec522016-06-21 14:14:08 +03002761 if (mvm->rx_ba_sessions > 0)
2762 /* check that restart flow didn't zero the counter */
2763 mvm->rx_ba_sessions--;
Sara Sharon10b2b202016-03-20 16:23:41 +02002764 if (!iwl_mvm_has_new_rx_api(mvm))
2765 return 0;
2766
2767 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2768 return -EINVAL;
2769
2770 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2771 if (WARN_ON(!baid_data))
2772 return -EINVAL;
2773
2774 /* synchronize all rx queues so we can safely delete */
Sara Sharonb915c102016-03-23 16:32:02 +02002775 iwl_mvm_free_reorder(mvm, baid_data);
Sara Sharon10b2b202016-03-20 16:23:41 +02002776 del_timer_sync(&baid_data->session_timer);
Sara Sharon10b2b202016-03-20 16:23:41 +02002777 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2778 kfree_rcu(baid_data, rcu_head);
Sara Sharon35263a02016-06-21 12:12:10 +03002779 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002780 }
2781 return 0;
2782
2783out_free:
2784 kfree(baid_data);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002785 return ret;
2786}
2787
Liad Kaufman9794c642015-08-19 17:34:28 +03002788int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2789 int tid, u8 queue, bool start)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002790{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002791 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002792 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01002793 int ret;
2794 u32 status;
2795
2796 lockdep_assert_held(&mvm->mutex);
2797
2798 if (start) {
2799 mvm_sta->tfd_queue_msk |= BIT(queue);
2800 mvm_sta->tid_disable_agg &= ~BIT(tid);
2801 } else {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002802 /* In DQA-mode the queue isn't removed on agg termination */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002803 mvm_sta->tid_disable_agg |= BIT(tid);
2804 }
2805
2806 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2807 cmd.sta_id = mvm_sta->sta_id;
2808 cmd.add_modify = STA_MODE_MODIFY;
Sara Sharonbb497012016-09-29 14:52:40 +03002809 if (!iwl_mvm_has_new_tx_api(mvm))
2810 cmd.modify_mask = STA_MODIFY_QUEUES;
2811 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002812 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2813 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2814
2815 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002816 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2817 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002818 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002819 if (ret)
2820 return ret;
2821
Sara Sharon837c4da2016-01-07 16:50:45 +02002822 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002823 case ADD_STA_SUCCESS:
2824 break;
2825 default:
2826 ret = -EIO;
2827 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2828 start ? "start" : "stopp", status);
2829 break;
2830 }
2831
2832 return ret;
2833}
2834
Emmanuel Grumbachb797e3f2014-03-06 14:49:36 +02002835const u8 tid_to_mac80211_ac[] = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002836 IEEE80211_AC_BE,
2837 IEEE80211_AC_BK,
2838 IEEE80211_AC_BK,
2839 IEEE80211_AC_BE,
2840 IEEE80211_AC_VI,
2841 IEEE80211_AC_VI,
2842 IEEE80211_AC_VO,
2843 IEEE80211_AC_VO,
Liad Kaufman9794c642015-08-19 17:34:28 +03002844 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002845};
2846
Johannes Berg3e56ead2013-02-15 22:23:18 +01002847static const u8 tid_to_ucode_ac[] = {
2848 AC_BE,
2849 AC_BK,
2850 AC_BK,
2851 AC_BE,
2852 AC_VI,
2853 AC_VI,
2854 AC_VO,
2855 AC_VO,
2856};
2857
Johannes Berg8ca151b2013-01-24 14:25:36 +01002858int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2859 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2860{
Johannes Berg5b577a92013-11-14 18:20:04 +01002861 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002862 struct iwl_mvm_tid_data *tid_data;
Liad Kaufmandd321622017-04-05 16:25:11 +03002863 u16 normalized_ssn;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002864 int txq_id;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002865 int ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002866
2867 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2868 return -EINVAL;
2869
Naftali Goldsteinbd800e42017-08-28 11:51:05 +03002870 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2871 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2872 IWL_ERR(mvm,
2873 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
Johannes Berg8ca151b2013-01-24 14:25:36 +01002874 mvmsta->tid_data[tid].state);
2875 return -ENXIO;
2876 }
2877
2878 lockdep_assert_held(&mvm->mutex);
2879
Liad Kaufmanbd8f3fc2018-01-17 15:25:28 +02002880 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2881 iwl_mvm_has_new_tx_api(mvm)) {
2882 u8 ac = tid_to_mac80211_ac[tid];
2883
2884 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2885 if (ret)
2886 return ret;
2887 }
2888
Arik Nemtsovb2492502014-03-13 12:21:50 +02002889 spin_lock_bh(&mvmsta->lock);
2890
2891 /* possible race condition - we entered D0i3 while starting agg */
2892 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2893 spin_unlock_bh(&mvmsta->lock);
2894 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2895 return -EIO;
2896 }
2897
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002898 spin_lock(&mvm->queue_info_lock);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002899
Liad Kaufmancf961e12015-08-13 19:16:08 +03002900 /*
2901 * Note the possible cases:
Avraham Stern4a6d2e52018-03-05 11:26:53 +02002902 * 1. An enabled TXQ - TXQ needs to become agg'ed
2903 * 2. The TXQ hasn't yet been enabled, so find a free one and mark
2904 * it as reserved
Liad Kaufmancf961e12015-08-13 19:16:08 +03002905 */
2906 txq_id = mvmsta->tid_data[tid].txq_id;
Avraham Stern4a6d2e52018-03-05 11:26:53 +02002907 if (txq_id == IWL_MVM_INVALID_QUEUE) {
Liad Kaufman9794c642015-08-19 17:34:28 +03002908 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
Johannes Bergc8f54702017-06-19 23:50:31 +02002909 IWL_MVM_DQA_MIN_DATA_QUEUE,
2910 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002911 if (txq_id < 0) {
2912 ret = txq_id;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002913 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2914 goto release_locks;
2915 }
2916
2917 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2918 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
Avraham Stern4a6d2e52018-03-05 11:26:53 +02002919 } else if (unlikely(mvm->queue_info[txq_id].status ==
2920 IWL_MVM_QUEUE_SHARED)) {
2921 ret = -ENXIO;
2922 IWL_DEBUG_TX_QUEUES(mvm,
2923 "Can't start tid %d agg on shared queue!\n",
2924 tid);
2925 goto release_locks;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002926 }
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002927
2928 spin_unlock(&mvm->queue_info_lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002929
Liad Kaufmancf961e12015-08-13 19:16:08 +03002930 IWL_DEBUG_TX_QUEUES(mvm,
2931 "AGG for tid %d will be on queue #%d\n",
2932 tid, txq_id);
2933
Johannes Berg8ca151b2013-01-24 14:25:36 +01002934 tid_data = &mvmsta->tid_data[tid];
Johannes Berg9a886582013-02-15 19:25:00 +01002935 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002936 tid_data->txq_id = txq_id;
2937 *ssn = tid_data->ssn;
2938
2939 IWL_DEBUG_TX_QUEUES(mvm,
2940 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2941 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2942 tid_data->next_reclaimed);
2943
Liad Kaufmandd321622017-04-05 16:25:11 +03002944 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02002945 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
Liad Kaufmandd321622017-04-05 16:25:11 +03002946 * to align the wrap around of ssn so we compare relevant values.
2947 */
2948 normalized_ssn = tid_data->ssn;
2949 if (mvm->trans->cfg->gen2)
2950 normalized_ssn &= 0xff;
2951
2952 if (normalized_ssn == tid_data->next_reclaimed) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002953 tid_data->state = IWL_AGG_STARTING;
2954 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2955 } else {
2956 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2957 }
2958
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002959 ret = 0;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002960 goto out;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002961
2962release_locks:
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002963 spin_unlock(&mvm->queue_info_lock);
2964out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01002965 spin_unlock_bh(&mvmsta->lock);
2966
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002967 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002968}
2969
2970int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
Luca Coelho514c30692018-06-24 11:59:54 +03002971 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002972 bool amsdu)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002973{
Johannes Berg5b577a92013-11-14 18:20:04 +01002974 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002975 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +02002976 unsigned int wdg_timeout =
2977 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002978 int queue, ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002979 bool alloc_queue = true;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002980 enum iwl_mvm_queue_status queue_status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002981 u16 ssn;
2982
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002983 struct iwl_trans_txq_scd_cfg cfg = {
2984 .sta_id = mvmsta->sta_id,
2985 .tid = tid,
2986 .frame_limit = buf_size,
2987 .aggregate = true,
2988 };
2989
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02002990 /*
2991 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
2992 * manager, so this function should never be called in this case.
2993 */
Emmanuel Grumbach4243edb2017-12-13 11:38:48 +02002994 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02002995 return -EINVAL;
2996
Eyal Shapiraefed6642014-09-14 15:58:53 +03002997 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2998 != IWL_MAX_TID_COUNT);
2999
Johannes Berg8ca151b2013-01-24 14:25:36 +01003000 spin_lock_bh(&mvmsta->lock);
3001 ssn = tid_data->ssn;
3002 queue = tid_data->txq_id;
3003 tid_data->state = IWL_AGG_ON;
Eyal Shapiraefed6642014-09-14 15:58:53 +03003004 mvmsta->agg_tids |= BIT(tid);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003005 tid_data->ssn = 0xffff;
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02003006 tid_data->amsdu_in_ampdu_allowed = amsdu;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003007 spin_unlock_bh(&mvmsta->lock);
3008
Sara Sharon34e10862017-02-23 13:15:07 +02003009 if (iwl_mvm_has_new_tx_api(mvm)) {
3010 /*
Sara Sharon0ec9257b2017-10-16 09:45:10 +03003011 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
3012 * would have failed, so if we are here there is no need to
3013 * allocate a queue.
3014 * However, if aggregation size is different than the default
3015 * size, the scheduler should be reconfigured.
3016 * We cannot do this with the new TX API, so return unsupported
3017 * for now, until it will be offloaded to firmware..
3018 * Note that if SCD default value changes - this condition
3019 * should be updated as well.
Sara Sharon34e10862017-02-23 13:15:07 +02003020 */
Sara Sharon0ec9257b2017-10-16 09:45:10 +03003021 if (buf_size < IWL_FRAME_LIMIT)
Sara Sharon34e10862017-02-23 13:15:07 +02003022 return -ENOTSUPP;
3023
3024 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3025 if (ret)
3026 return -EIO;
3027 goto out;
3028 }
3029
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02003030 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
Johannes Berg8ca151b2013-01-24 14:25:36 +01003031
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02003032 spin_lock_bh(&mvm->queue_info_lock);
3033 queue_status = mvm->queue_info[queue].status;
3034 spin_unlock_bh(&mvm->queue_info_lock);
3035
Johannes Bergc8f54702017-06-19 23:50:31 +02003036 /* Maybe there is no need to even alloc a queue... */
3037 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
3038 alloc_queue = false;
Liad Kaufmancf961e12015-08-13 19:16:08 +03003039
Johannes Bergc8f54702017-06-19 23:50:31 +02003040 /*
3041 * Only reconfig the SCD for the queue if the window size has
3042 * changed from current (become smaller)
3043 */
Sara Sharon0ec9257b2017-10-16 09:45:10 +03003044 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
Liad Kaufmancf961e12015-08-13 19:16:08 +03003045 /*
Johannes Bergc8f54702017-06-19 23:50:31 +02003046 * If reconfiguring an existing queue, it first must be
3047 * drained
Liad Kaufmancf961e12015-08-13 19:16:08 +03003048 */
Johannes Bergc8f54702017-06-19 23:50:31 +02003049 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
3050 BIT(queue));
3051 if (ret) {
3052 IWL_ERR(mvm,
3053 "Error draining queue before reconfig\n");
3054 return ret;
3055 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03003056
Johannes Bergc8f54702017-06-19 23:50:31 +02003057 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
3058 mvmsta->sta_id, tid,
3059 buf_size, ssn);
3060 if (ret) {
3061 IWL_ERR(mvm,
3062 "Error reconfiguring TXQ #%d\n", queue);
3063 return ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03003064 }
3065 }
3066
3067 if (alloc_queue)
3068 iwl_mvm_enable_txq(mvm, queue,
3069 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
3070 &cfg, wdg_timeout);
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03003071
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02003072 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
3073 if (queue_status != IWL_MVM_QUEUE_SHARED) {
3074 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3075 if (ret)
3076 return -EIO;
3077 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003078
Liad Kaufman4ecafae2015-07-14 13:36:18 +03003079 /* No need to mark as reserved */
3080 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03003081 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03003082 spin_unlock_bh(&mvm->queue_info_lock);
3083
Sara Sharon34e10862017-02-23 13:15:07 +02003084out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01003085 /*
3086 * Even though in theory the peer could have different
3087 * aggregation reorder buffer sizes for different sessions,
3088 * our ucode doesn't allow for that and has a global limit
3089 * for each station. Therefore, use the minimum of all the
3090 * aggregation sessions and our default value.
3091 */
3092 mvmsta->max_agg_bufsize =
3093 min(mvmsta->max_agg_bufsize, buf_size);
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02003094 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003095
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03003096 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3097 sta->addr, tid);
3098
Gregory Greenmanecaf71d2017-11-01 07:16:29 +02003099 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003100}
3101
Sara Sharon34e10862017-02-23 13:15:07 +02003102static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3103 struct iwl_mvm_sta *mvmsta,
Avraham Stern4b387902018-03-07 10:41:18 +02003104 struct iwl_mvm_tid_data *tid_data)
Sara Sharon34e10862017-02-23 13:15:07 +02003105{
Avraham Stern4b387902018-03-07 10:41:18 +02003106 u16 txq_id = tid_data->txq_id;
3107
Sara Sharon34e10862017-02-23 13:15:07 +02003108 if (iwl_mvm_has_new_tx_api(mvm))
3109 return;
3110
3111 spin_lock_bh(&mvm->queue_info_lock);
3112 /*
3113 * The TXQ is marked as reserved only if no traffic came through yet
3114 * This means no traffic has been sent on this TID (agg'd or not), so
3115 * we no longer have use for the queue. Since it hasn't even been
3116 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3117 * free.
3118 */
Avraham Stern4b387902018-03-07 10:41:18 +02003119 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
Sara Sharon34e10862017-02-23 13:15:07 +02003120 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
Avraham Stern4b387902018-03-07 10:41:18 +02003121 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3122 }
Sara Sharon34e10862017-02-23 13:15:07 +02003123
3124 spin_unlock_bh(&mvm->queue_info_lock);
3125}
3126
Johannes Berg8ca151b2013-01-24 14:25:36 +01003127int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3128 struct ieee80211_sta *sta, u16 tid)
3129{
Johannes Berg5b577a92013-11-14 18:20:04 +01003130 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003131 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3132 u16 txq_id;
3133 int err;
3134
Emmanuel Grumbachf9aa8dd2013-03-04 09:11:08 +02003135 /*
3136 * If mac80211 is cleaning its state, then say that we finished since
3137 * our state has been cleared anyway.
3138 */
3139 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3140 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3141 return 0;
3142 }
3143
Johannes Berg8ca151b2013-01-24 14:25:36 +01003144 spin_lock_bh(&mvmsta->lock);
3145
3146 txq_id = tid_data->txq_id;
3147
3148 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3149 mvmsta->sta_id, tid, txq_id, tid_data->state);
3150
Eyal Shapiraefed6642014-09-14 15:58:53 +03003151 mvmsta->agg_tids &= ~BIT(tid);
3152
Avraham Stern4b387902018-03-07 10:41:18 +02003153 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03003154
Johannes Berg8ca151b2013-01-24 14:25:36 +01003155 switch (tid_data->state) {
3156 case IWL_AGG_ON:
Johannes Berg9a886582013-02-15 19:25:00 +01003157 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003158
3159 IWL_DEBUG_TX_QUEUES(mvm,
3160 "ssn = %d, next_recl = %d\n",
3161 tid_data->ssn, tid_data->next_reclaimed);
3162
Johannes Berg8ca151b2013-01-24 14:25:36 +01003163 tid_data->ssn = 0xffff;
Johannes Bergf7f89e72014-08-05 15:24:44 +02003164 tid_data->state = IWL_AGG_OFF;
Johannes Bergf7f89e72014-08-05 15:24:44 +02003165 spin_unlock_bh(&mvmsta->lock);
3166
3167 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3168
3169 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
Johannes Bergf7f89e72014-08-05 15:24:44 +02003170 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003171 case IWL_AGG_STARTING:
3172 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3173 /*
3174 * The agg session has been stopped before it was set up. This
3175 * can happen when the AddBA timer times out for example.
3176 */
3177
3178 /* No barriers since we are under mutex */
3179 lockdep_assert_held(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003180
3181 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3182 tid_data->state = IWL_AGG_OFF;
3183 err = 0;
3184 break;
3185 default:
3186 IWL_ERR(mvm,
3187 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3188 mvmsta->sta_id, tid, tid_data->state);
3189 IWL_ERR(mvm,
3190 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3191 err = -EINVAL;
3192 }
3193
3194 spin_unlock_bh(&mvmsta->lock);
3195
3196 return err;
3197}
3198
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003199int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3200 struct ieee80211_sta *sta, u16 tid)
3201{
Johannes Berg5b577a92013-11-14 18:20:04 +01003202 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003203 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3204 u16 txq_id;
Johannes Bergb6658ff2013-07-24 13:55:51 +02003205 enum iwl_mvm_agg_state old_state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003206
3207 /*
3208 * First set the agg state to OFF to avoid calling
3209 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3210 */
3211 spin_lock_bh(&mvmsta->lock);
3212 txq_id = tid_data->txq_id;
3213 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3214 mvmsta->sta_id, tid, txq_id, tid_data->state);
Johannes Bergb6658ff2013-07-24 13:55:51 +02003215 old_state = tid_data->state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003216 tid_data->state = IWL_AGG_OFF;
Eyal Shapiraefed6642014-09-14 15:58:53 +03003217 mvmsta->agg_tids &= ~BIT(tid);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003218 spin_unlock_bh(&mvmsta->lock);
3219
Avraham Stern4b387902018-03-07 10:41:18 +02003220 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03003221
Johannes Bergb6658ff2013-07-24 13:55:51 +02003222 if (old_state >= IWL_AGG_ON) {
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02003223 iwl_mvm_drain_sta(mvm, mvmsta, true);
Sara Sharond6d517b2017-03-06 10:16:11 +02003224
Mordechai Goodsteind167e812017-05-10 16:42:53 +03003225 if (iwl_mvm_has_new_tx_api(mvm)) {
3226 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3227 BIT(tid), 0))
3228 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Sara Sharond6d517b2017-03-06 10:16:11 +02003229 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
Mordechai Goodsteind167e812017-05-10 16:42:53 +03003230 } else {
3231 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
3232 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Sara Sharond6d517b2017-03-06 10:16:11 +02003233 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
Mordechai Goodsteind167e812017-05-10 16:42:53 +03003234 }
Sara Sharond6d517b2017-03-06 10:16:11 +02003235
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02003236 iwl_mvm_drain_sta(mvm, mvmsta, false);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003237
Johannes Bergf7f89e72014-08-05 15:24:44 +02003238 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
Johannes Bergb6658ff2013-07-24 13:55:51 +02003239 }
3240
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02003241 return 0;
3242}
3243
Johannes Berg8ca151b2013-01-24 14:25:36 +01003244static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3245{
Johannes Berg2dc2a152015-06-16 17:09:18 +02003246 int i, max = -1, max_offs = -1;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003247
3248 lockdep_assert_held(&mvm->mutex);
3249
Johannes Berg2dc2a152015-06-16 17:09:18 +02003250 /* Pick the unused key offset with the highest 'deleted'
3251 * counter. Every time a key is deleted, all the counters
3252 * are incremented and the one that was just deleted is
3253 * reset to zero. Thus, the highest counter is the one
3254 * that was deleted longest ago. Pick that one.
3255 */
3256 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3257 if (test_bit(i, mvm->fw_key_table))
3258 continue;
3259 if (mvm->fw_key_deleted[i] > max) {
3260 max = mvm->fw_key_deleted[i];
3261 max_offs = i;
3262 }
3263 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003264
Johannes Berg2dc2a152015-06-16 17:09:18 +02003265 if (max_offs < 0)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003266 return STA_KEY_IDX_INVALID;
3267
Johannes Berg2dc2a152015-06-16 17:09:18 +02003268 return max_offs;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003269}
3270
Johannes Berg5f7a1842015-12-11 09:36:10 +01003271static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3272 struct ieee80211_vif *vif,
3273 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003274{
Johannes Berg5b530e92014-12-23 16:00:17 +01003275 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003276
Johannes Berg5f7a1842015-12-11 09:36:10 +01003277 if (sta)
3278 return iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003279
3280 /*
3281 * The device expects GTKs for station interfaces to be
3282 * installed as GTKs for the AP station. If we have no
3283 * station ID, then use AP's station ID.
3284 */
3285 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02003286 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Avri Altman9513c5e2015-10-19 16:29:11 +02003287 u8 sta_id = mvmvif->ap_sta_id;
3288
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03003289 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3290 lockdep_is_held(&mvm->mutex));
3291
Avri Altman9513c5e2015-10-19 16:29:11 +02003292 /*
3293 * It is possible that the 'sta' parameter is NULL,
3294 * for example when a GTK is removed - the sta_id will then
3295 * be the AP ID, and no station was passed by mac80211.
3296 */
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03003297 if (IS_ERR_OR_NULL(sta))
3298 return NULL;
3299
3300 return iwl_mvm_sta_from_mac80211(sta);
Avri Altman9513c5e2015-10-19 16:29:11 +02003301 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003302
Johannes Berg5f7a1842015-12-11 09:36:10 +01003303 return NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003304}
3305
3306static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
David Spinadel85aeb582017-03-30 19:43:53 +03003307 u32 sta_id,
Sara Sharon45c458b2016-11-09 15:43:26 +02003308 struct ieee80211_key_conf *key, bool mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003309 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003310 u8 key_offset, bool mfp)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003311{
Sara Sharon45c458b2016-11-09 15:43:26 +02003312 union {
3313 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3314 struct iwl_mvm_add_sta_key_cmd cmd;
3315 } u = {};
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003316 __le16 key_flags;
Johannes Berg79920742014-11-03 15:43:04 +01003317 int ret;
3318 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003319 u16 keyidx;
Sara Sharon45c458b2016-11-09 15:43:26 +02003320 u64 pn = 0;
3321 int i, size;
3322 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3323 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003324
David Spinadel85aeb582017-03-30 19:43:53 +03003325 if (sta_id == IWL_MVM_INVALID_STA)
3326 return -EINVAL;
3327
Sara Sharon45c458b2016-11-09 15:43:26 +02003328 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
Johannes Berg8ca151b2013-01-24 14:25:36 +01003329 STA_KEY_FLG_KEYID_MSK;
3330 key_flags = cpu_to_le16(keyidx);
3331 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3332
Sara Sharon45c458b2016-11-09 15:43:26 +02003333 switch (key->cipher) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003334 case WLAN_CIPHER_SUITE_TKIP:
3335 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003336 if (new_api) {
3337 memcpy((void *)&u.cmd.tx_mic_key,
3338 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3339 IWL_MIC_KEY_SIZE);
3340
3341 memcpy((void *)&u.cmd.rx_mic_key,
3342 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3343 IWL_MIC_KEY_SIZE);
3344 pn = atomic64_read(&key->tx_pn);
3345
3346 } else {
3347 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3348 for (i = 0; i < 5; i++)
3349 u.cmd_v1.tkip_rx_ttak[i] =
3350 cpu_to_le16(tkip_p1k[i]);
3351 }
3352 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003353 break;
3354 case WLAN_CIPHER_SUITE_CCMP:
3355 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
Sara Sharon45c458b2016-11-09 15:43:26 +02003356 memcpy(u.cmd.common.key, key->key, key->keylen);
3357 if (new_api)
3358 pn = atomic64_read(&key->tx_pn);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003359 break;
Johannes Bergba3943b2014-11-12 23:54:48 +01003360 case WLAN_CIPHER_SUITE_WEP104:
3361 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
John W. Linvilleaa0cb082015-01-12 16:18:11 -05003362 /* fall through */
Johannes Bergba3943b2014-11-12 23:54:48 +01003363 case WLAN_CIPHER_SUITE_WEP40:
3364 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003365 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
Johannes Bergba3943b2014-11-12 23:54:48 +01003366 break;
Ayala Beker2a53d162016-04-07 16:21:57 +03003367 case WLAN_CIPHER_SUITE_GCMP_256:
3368 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3369 /* fall through */
3370 case WLAN_CIPHER_SUITE_GCMP:
3371 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
Sara Sharon45c458b2016-11-09 15:43:26 +02003372 memcpy(u.cmd.common.key, key->key, key->keylen);
3373 if (new_api)
3374 pn = atomic64_read(&key->tx_pn);
Ayala Beker2a53d162016-04-07 16:21:57 +03003375 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003376 default:
Max Stepanove36e5432013-08-27 19:56:13 +03003377 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
Sara Sharon45c458b2016-11-09 15:43:26 +02003378 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003379 }
3380
Johannes Bergba3943b2014-11-12 23:54:48 +01003381 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003382 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003383 if (mfp)
3384 key_flags |= cpu_to_le16(STA_KEY_MFP);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003385
Sara Sharon45c458b2016-11-09 15:43:26 +02003386 u.cmd.common.key_offset = key_offset;
3387 u.cmd.common.key_flags = key_flags;
David Spinadel85aeb582017-03-30 19:43:53 +03003388 u.cmd.common.sta_id = sta_id;
Sara Sharon45c458b2016-11-09 15:43:26 +02003389
3390 if (new_api) {
3391 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3392 size = sizeof(u.cmd);
3393 } else {
3394 size = sizeof(u.cmd_v1);
3395 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003396
3397 status = ADD_STA_SUCCESS;
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003398 if (cmd_flags & CMD_ASYNC)
Sara Sharon45c458b2016-11-09 15:43:26 +02003399 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3400 &u.cmd);
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003401 else
Sara Sharon45c458b2016-11-09 15:43:26 +02003402 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3403 &u.cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003404
3405 switch (status) {
3406 case ADD_STA_SUCCESS:
3407 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3408 break;
3409 default:
3410 ret = -EIO;
3411 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3412 break;
3413 }
3414
3415 return ret;
3416}
3417
3418static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3419 struct ieee80211_key_conf *keyconf,
3420 u8 sta_id, bool remove_key)
3421{
3422 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3423
3424 /* verify the key details match the required command's expectations */
Ayala Beker8e160ab2016-04-11 11:37:38 +03003425 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3426 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3427 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3428 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3429 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3430 return -EINVAL;
3431
3432 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3433 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
Johannes Berg8ca151b2013-01-24 14:25:36 +01003434 return -EINVAL;
3435
3436 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3437 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3438
3439 if (remove_key) {
3440 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3441 } else {
3442 struct ieee80211_key_seq seq;
3443 const u8 *pn;
3444
Ayala Bekeraa950522016-06-01 00:28:09 +03003445 switch (keyconf->cipher) {
3446 case WLAN_CIPHER_SUITE_AES_CMAC:
3447 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3448 break;
Ayala Beker8e160ab2016-04-11 11:37:38 +03003449 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3450 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3451 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3452 break;
Ayala Bekeraa950522016-06-01 00:28:09 +03003453 default:
3454 return -EINVAL;
3455 }
3456
Ayala Beker8e160ab2016-04-11 11:37:38 +03003457 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3458 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3459 igtk_cmd.ctrl_flags |=
3460 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003461 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3462 pn = seq.aes_cmac.pn;
3463 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3464 ((u64) pn[4] << 8) |
3465 ((u64) pn[3] << 16) |
3466 ((u64) pn[2] << 24) |
3467 ((u64) pn[1] << 32) |
3468 ((u64) pn[0] << 40));
3469 }
3470
3471 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3472 remove_key ? "removing" : "installing",
3473 igtk_cmd.sta_id);
3474
Ayala Beker8e160ab2016-04-11 11:37:38 +03003475 if (!iwl_mvm_has_new_rx_api(mvm)) {
3476 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3477 .ctrl_flags = igtk_cmd.ctrl_flags,
3478 .key_id = igtk_cmd.key_id,
3479 .sta_id = igtk_cmd.sta_id,
3480 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3481 };
3482
3483 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3484 ARRAY_SIZE(igtk_cmd_v1.igtk));
3485 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3486 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3487 }
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003488 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003489 sizeof(igtk_cmd), &igtk_cmd);
3490}
3491
3492
3493static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3494 struct ieee80211_vif *vif,
3495 struct ieee80211_sta *sta)
3496{
Johannes Berg5b530e92014-12-23 16:00:17 +01003497 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003498
3499 if (sta)
3500 return sta->addr;
3501
3502 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02003503 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003504 u8 sta_id = mvmvif->ap_sta_id;
3505 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3506 lockdep_is_held(&mvm->mutex));
3507 return sta->addr;
3508 }
3509
3510
3511 return NULL;
3512}
3513
Johannes Berg2f6319d2014-11-12 23:39:56 +01003514static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3515 struct ieee80211_vif *vif,
3516 struct ieee80211_sta *sta,
Johannes Bergba3943b2014-11-12 23:54:48 +01003517 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003518 u8 key_offset,
Johannes Bergba3943b2014-11-12 23:54:48 +01003519 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003520{
Johannes Berg8ca151b2013-01-24 14:25:36 +01003521 int ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003522 const u8 *addr;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003523 struct ieee80211_key_seq seq;
3524 u16 p1k[5];
David Spinadel85aeb582017-03-30 19:43:53 +03003525 u32 sta_id;
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003526 bool mfp = false;
David Spinadel85aeb582017-03-30 19:43:53 +03003527
3528 if (sta) {
3529 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3530
3531 sta_id = mvm_sta->sta_id;
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003532 mfp = sta->mfp;
David Spinadel85aeb582017-03-30 19:43:53 +03003533 } else if (vif->type == NL80211_IFTYPE_AP &&
3534 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3535 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3536
3537 sta_id = mvmvif->mcast_sta.sta_id;
3538 } else {
3539 IWL_ERR(mvm, "Failed to find station id\n");
3540 return -EINVAL;
3541 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01003542
Johannes Berg8ca151b2013-01-24 14:25:36 +01003543 switch (keyconf->cipher) {
3544 case WLAN_CIPHER_SUITE_TKIP:
3545 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3546 /* get phase 1 key from mac80211 */
3547 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3548 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
David Spinadel85aeb582017-03-30 19:43:53 +03003549 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003550 seq.tkip.iv32, p1k, 0, key_offset,
3551 mfp);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003552 break;
3553 case WLAN_CIPHER_SUITE_CCMP:
Johannes Bergba3943b2014-11-12 23:54:48 +01003554 case WLAN_CIPHER_SUITE_WEP40:
3555 case WLAN_CIPHER_SUITE_WEP104:
Ayala Beker2a53d162016-04-07 16:21:57 +03003556 case WLAN_CIPHER_SUITE_GCMP:
3557 case WLAN_CIPHER_SUITE_GCMP_256:
David Spinadel85aeb582017-03-30 19:43:53 +03003558 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003559 0, NULL, 0, key_offset, mfp);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003560 break;
3561 default:
David Spinadel85aeb582017-03-30 19:43:53 +03003562 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003563 0, NULL, 0, key_offset, mfp);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003564 }
3565
Johannes Berg8ca151b2013-01-24 14:25:36 +01003566 return ret;
3567}
3568
Johannes Berg2f6319d2014-11-12 23:39:56 +01003569static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
Johannes Bergba3943b2014-11-12 23:54:48 +01003570 struct ieee80211_key_conf *keyconf,
3571 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003572{
Sara Sharon45c458b2016-11-09 15:43:26 +02003573 union {
3574 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3575 struct iwl_mvm_add_sta_key_cmd cmd;
3576 } u = {};
3577 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3578 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003579 __le16 key_flags;
Sara Sharon45c458b2016-11-09 15:43:26 +02003580 int ret, size;
Johannes Berg79920742014-11-03 15:43:04 +01003581 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003582
Sara Sharone4f13ad2018-01-15 13:50:59 +02003583 /* This is a valid situation for GTK removal */
David Spinadel85aeb582017-03-30 19:43:53 +03003584 if (sta_id == IWL_MVM_INVALID_STA)
Sara Sharone4f13ad2018-01-15 13:50:59 +02003585 return 0;
David Spinadel85aeb582017-03-30 19:43:53 +03003586
Emmanuel Grumbach8115efb2013-02-05 10:08:35 +02003587 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3588 STA_KEY_FLG_KEYID_MSK);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003589 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3590 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3591
Johannes Bergba3943b2014-11-12 23:54:48 +01003592 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003593 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3594
Sara Sharon45c458b2016-11-09 15:43:26 +02003595 /*
3596 * The fields assigned here are in the same location at the start
3597 * of the command, so we can do this union trick.
3598 */
3599 u.cmd.common.key_flags = key_flags;
3600 u.cmd.common.key_offset = keyconf->hw_key_idx;
3601 u.cmd.common.sta_id = sta_id;
3602
3603 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003604
Johannes Berg8ca151b2013-01-24 14:25:36 +01003605 status = ADD_STA_SUCCESS;
Sara Sharon45c458b2016-11-09 15:43:26 +02003606 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3607 &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003608
3609 switch (status) {
3610 case ADD_STA_SUCCESS:
3611 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3612 break;
3613 default:
3614 ret = -EIO;
3615 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3616 break;
3617 }
3618
3619 return ret;
3620}
3621
Johannes Berg2f6319d2014-11-12 23:39:56 +01003622int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3623 struct ieee80211_vif *vif,
3624 struct ieee80211_sta *sta,
3625 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003626 u8 key_offset)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003627{
Johannes Bergba3943b2014-11-12 23:54:48 +01003628 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003629 struct iwl_mvm_sta *mvm_sta;
David Spinadel85aeb582017-03-30 19:43:53 +03003630 u8 sta_id = IWL_MVM_INVALID_STA;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003631 int ret;
Matti Gottlieb11828db2015-06-01 15:15:11 +03003632 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
Johannes Berg2f6319d2014-11-12 23:39:56 +01003633
3634 lockdep_assert_held(&mvm->mutex);
3635
David Spinadel85aeb582017-03-30 19:43:53 +03003636 if (vif->type != NL80211_IFTYPE_AP ||
3637 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3638 /* Get the station id from the mvm local station table */
3639 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3640 if (!mvm_sta) {
3641 IWL_ERR(mvm, "Failed to find station\n");
Johannes Berg2f6319d2014-11-12 23:39:56 +01003642 return -EINVAL;
3643 }
David Spinadel85aeb582017-03-30 19:43:53 +03003644 sta_id = mvm_sta->sta_id;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003645
David Spinadel85aeb582017-03-30 19:43:53 +03003646 /*
3647 * It is possible that the 'sta' parameter is NULL, and thus
Beni Leve829b172018-02-20 13:41:54 +02003648 * there is a need to retrieve the sta from the local station
David Spinadel85aeb582017-03-30 19:43:53 +03003649 * table.
3650 */
3651 if (!sta) {
3652 sta = rcu_dereference_protected(
3653 mvm->fw_id_to_mac_id[sta_id],
3654 lockdep_is_held(&mvm->mutex));
3655 if (IS_ERR_OR_NULL(sta)) {
3656 IWL_ERR(mvm, "Invalid station id\n");
3657 return -EINVAL;
3658 }
3659 }
3660
3661 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3662 return -EINVAL;
Beni Leve829b172018-02-20 13:41:54 +02003663 } else {
3664 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3665
3666 sta_id = mvmvif->mcast_sta.sta_id;
3667 }
3668
3669 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3670 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3671 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3672 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3673 goto end;
David Spinadel85aeb582017-03-30 19:43:53 +03003674 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01003675
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003676 /* If the key_offset is not pre-assigned, we need to find a
3677 * new offset to use. In normal cases, the offset is not
3678 * pre-assigned, but during HW_RESTART we want to reuse the
3679 * same indices, so we pass them when this function is called.
3680 *
3681 * In D3 entry, we need to hardcoded the indices (because the
3682 * firmware hardcodes the PTK offset to 0). In this case, we
3683 * need to make sure we don't overwrite the hw_key_idx in the
3684 * keyconf structure, because otherwise we cannot configure
3685 * the original ones back when resuming.
3686 */
3687 if (key_offset == STA_KEY_IDX_INVALID) {
3688 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3689 if (key_offset == STA_KEY_IDX_INVALID)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003690 return -ENOSPC;
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003691 keyconf->hw_key_idx = key_offset;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003692 }
3693
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003694 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003695 if (ret)
Johannes Bergba3943b2014-11-12 23:54:48 +01003696 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003697
3698 /*
3699 * For WEP, the same key is used for multicast and unicast. Upload it
3700 * again, using the same key offset, and now pointing the other one
3701 * to the same key slot (offset).
3702 * If this fails, remove the original as well.
3703 */
David Spinadel85aeb582017-03-30 19:43:53 +03003704 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3705 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3706 sta) {
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003707 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3708 key_offset, !mcast);
Johannes Bergba3943b2014-11-12 23:54:48 +01003709 if (ret) {
Johannes Bergba3943b2014-11-12 23:54:48 +01003710 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003711 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003712 }
3713 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01003714
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003715 __set_bit(key_offset, mvm->fw_key_table);
3716
Johannes Berg2f6319d2014-11-12 23:39:56 +01003717end:
3718 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3719 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
Matti Gottlieb11828db2015-06-01 15:15:11 +03003720 sta ? sta->addr : zero_addr, ret);
Johannes Berg2f6319d2014-11-12 23:39:56 +01003721 return ret;
3722}
3723
3724int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3725 struct ieee80211_vif *vif,
3726 struct ieee80211_sta *sta,
3727 struct ieee80211_key_conf *keyconf)
3728{
Johannes Bergba3943b2014-11-12 23:54:48 +01003729 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003730 struct iwl_mvm_sta *mvm_sta;
Sara Sharon0ae98812017-01-04 14:53:58 +02003731 u8 sta_id = IWL_MVM_INVALID_STA;
Johannes Berg2dc2a152015-06-16 17:09:18 +02003732 int ret, i;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003733
3734 lockdep_assert_held(&mvm->mutex);
3735
Johannes Berg5f7a1842015-12-11 09:36:10 +01003736 /* Get the station from the mvm local station table */
3737 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
Luca Coelho71793b7d2017-03-30 12:04:47 +03003738 if (mvm_sta)
3739 sta_id = mvm_sta->sta_id;
David Spinadel85aeb582017-03-30 19:43:53 +03003740 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3741 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3742
Johannes Berg2f6319d2014-11-12 23:39:56 +01003743
3744 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3745 keyconf->keyidx, sta_id);
3746
Luca Coelho71793b7d2017-03-30 12:04:47 +03003747 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3748 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3749 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
Johannes Berg2f6319d2014-11-12 23:39:56 +01003750 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3751
3752 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3753 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3754 keyconf->hw_key_idx);
3755 return -ENOENT;
3756 }
3757
Johannes Berg2dc2a152015-06-16 17:09:18 +02003758 /* track which key was deleted last */
3759 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3760 if (mvm->fw_key_deleted[i] < U8_MAX)
3761 mvm->fw_key_deleted[i]++;
3762 }
3763 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3764
David Spinadel85aeb582017-03-30 19:43:53 +03003765 if (sta && !mvm_sta) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01003766 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3767 return 0;
3768 }
3769
Johannes Bergba3943b2014-11-12 23:54:48 +01003770 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3771 if (ret)
3772 return ret;
3773
3774 /* delete WEP key twice to get rid of (now useless) offset */
3775 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3776 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3777 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3778
3779 return ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003780}
3781
Johannes Berg8ca151b2013-01-24 14:25:36 +01003782void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3783 struct ieee80211_vif *vif,
3784 struct ieee80211_key_conf *keyconf,
3785 struct ieee80211_sta *sta, u32 iv32,
3786 u16 *phase1key)
3787{
Beni Levc3eb5362013-02-06 17:22:18 +02003788 struct iwl_mvm_sta *mvm_sta;
Johannes Bergba3943b2014-11-12 23:54:48 +01003789 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003790 bool mfp = sta ? sta->mfp : false;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003791
Beni Levc3eb5362013-02-06 17:22:18 +02003792 rcu_read_lock();
3793
Johannes Berg5f7a1842015-12-11 09:36:10 +01003794 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3795 if (WARN_ON_ONCE(!mvm_sta))
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003796 goto unlock;
David Spinadel85aeb582017-03-30 19:43:53 +03003797 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
Emmanuel Grumbach48831452018-01-29 10:00:05 +02003798 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3799 mfp);
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003800
3801 unlock:
Beni Levc3eb5362013-02-06 17:22:18 +02003802 rcu_read_unlock();
Johannes Berg8ca151b2013-01-24 14:25:36 +01003803}
3804
Johannes Berg9cc40712013-02-15 22:47:48 +01003805void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3806 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003807{
Johannes Berg5b577a92013-11-14 18:20:04 +01003808 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003809 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003810 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003811 .sta_id = mvmsta->sta_id,
Emmanuel Grumbach5af01772013-06-09 12:59:24 +03003812 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
Johannes Berg9cc40712013-02-15 22:47:48 +01003813 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003814 };
3815 int ret;
3816
Sara Sharon854c5702016-01-26 13:17:47 +02003817 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3818 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003819 if (ret)
3820 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3821}
3822
Johannes Berg9cc40712013-02-15 22:47:48 +01003823void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3824 struct ieee80211_sta *sta,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003825 enum ieee80211_frame_release_type reason,
Johannes Berg3e56ead2013-02-15 22:23:18 +01003826 u16 cnt, u16 tids, bool more_data,
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003827 bool single_sta_queue)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003828{
Johannes Berg5b577a92013-11-14 18:20:04 +01003829 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003830 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003831 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003832 .sta_id = mvmsta->sta_id,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003833 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3834 .sleep_tx_count = cpu_to_le16(cnt),
Johannes Berg9cc40712013-02-15 22:47:48 +01003835 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003836 };
Johannes Berg3e56ead2013-02-15 22:23:18 +01003837 int tid, ret;
3838 unsigned long _tids = tids;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003839
Johannes Berg3e56ead2013-02-15 22:23:18 +01003840 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3841 * Note that this field is reserved and unused by firmware not
3842 * supporting GO uAPSD, so it's safe to always do this.
3843 */
3844 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3845 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3846
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003847 /* If we're releasing frames from aggregation or dqa queues then check
3848 * if all the queues that we're releasing frames from, combined, have:
Johannes Berg3e56ead2013-02-15 22:23:18 +01003849 * - more frames than the service period, in which case more_data
3850 * needs to be set
3851 * - fewer than 'cnt' frames, in which case we need to adjust the
3852 * firmware command (but do that unconditionally)
3853 */
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003854 if (single_sta_queue) {
Johannes Berg3e56ead2013-02-15 22:23:18 +01003855 int remaining = cnt;
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003856 int sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003857
3858 spin_lock_bh(&mvmsta->lock);
3859 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3860 struct iwl_mvm_tid_data *tid_data;
3861 u16 n_queued;
3862
3863 tid_data = &mvmsta->tid_data[tid];
Johannes Berg3e56ead2013-02-15 22:23:18 +01003864
Liad Kaufmandd321622017-04-05 16:25:11 +03003865 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003866 if (n_queued > remaining) {
3867 more_data = true;
3868 remaining = 0;
3869 break;
3870 }
3871 remaining -= n_queued;
3872 }
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003873 sleep_tx_count = cnt - remaining;
3874 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3875 mvmsta->sleep_tx_count = sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003876 spin_unlock_bh(&mvmsta->lock);
3877
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003878 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003879 if (WARN_ON(cnt - remaining == 0)) {
3880 ieee80211_sta_eosp(sta);
3881 return;
3882 }
3883 }
3884
3885 /* Note: this is ignored by firmware not supporting GO uAPSD */
3886 if (more_data)
Sara Sharonced19f22017-02-06 19:09:32 +02003887 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003888
3889 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3890 mvmsta->next_status_eosp = true;
Sara Sharonced19f22017-02-06 19:09:32 +02003891 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003892 } else {
Sara Sharonced19f22017-02-06 19:09:32 +02003893 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003894 }
3895
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +02003896 /* block the Tx queues until the FW updated the sleep Tx count */
3897 iwl_trans_block_txq_ptrs(mvm->trans, true);
3898
3899 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3900 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
Sara Sharon854c5702016-01-26 13:17:47 +02003901 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003902 if (ret)
3903 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3904}
Johannes Berg3e56ead2013-02-15 22:23:18 +01003905
Johannes Berg04168412015-06-23 21:22:09 +02003906void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3907 struct iwl_rx_cmd_buffer *rxb)
Johannes Berg3e56ead2013-02-15 22:23:18 +01003908{
3909 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3910 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3911 struct ieee80211_sta *sta;
3912 u32 sta_id = le32_to_cpu(notif->sta_id);
3913
3914 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
Johannes Berg04168412015-06-23 21:22:09 +02003915 return;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003916
3917 rcu_read_lock();
3918 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3919 if (!IS_ERR_OR_NULL(sta))
3920 ieee80211_sta_eosp(sta);
3921 rcu_read_unlock();
Johannes Berg3e56ead2013-02-15 22:23:18 +01003922}
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003923
3924void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3925 struct iwl_mvm_sta *mvmsta, bool disable)
3926{
3927 struct iwl_mvm_add_sta_cmd cmd = {
3928 .add_modify = STA_MODE_MODIFY,
3929 .sta_id = mvmsta->sta_id,
3930 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3931 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3932 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3933 };
3934 int ret;
3935
Sara Sharon854c5702016-01-26 13:17:47 +02003936 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3937 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003938 if (ret)
3939 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3940}
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003941
3942void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3943 struct ieee80211_sta *sta,
3944 bool disable)
3945{
3946 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3947
3948 spin_lock_bh(&mvm_sta->lock);
3949
3950 if (mvm_sta->disable_tx == disable) {
3951 spin_unlock_bh(&mvm_sta->lock);
3952 return;
3953 }
3954
3955 mvm_sta->disable_tx = disable;
3956
Johannes Bergc8f54702017-06-19 23:50:31 +02003957 /* Tell mac80211 to start/stop queuing tx for this station */
3958 ieee80211_sta_block_awake(mvm->hw, sta, disable);
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003959
3960 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3961
3962 spin_unlock_bh(&mvm_sta->lock);
3963}
3964
Sara Sharonced19f22017-02-06 19:09:32 +02003965static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3966 struct iwl_mvm_vif *mvmvif,
3967 struct iwl_mvm_int_sta *sta,
3968 bool disable)
3969{
3970 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3971 struct iwl_mvm_add_sta_cmd cmd = {
3972 .add_modify = STA_MODE_MODIFY,
3973 .sta_id = sta->sta_id,
3974 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3975 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3976 .mac_id_n_color = cpu_to_le32(id),
3977 };
3978 int ret;
3979
3980 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3981 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3982 if (ret)
3983 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3984}
3985
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003986void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3987 struct iwl_mvm_vif *mvmvif,
3988 bool disable)
3989{
3990 struct ieee80211_sta *sta;
3991 struct iwl_mvm_sta *mvm_sta;
3992 int i;
3993
3994 lockdep_assert_held(&mvm->mutex);
3995
3996 /* Block/unblock all the stations of the given mvmvif */
Sara Sharon0ae98812017-01-04 14:53:58 +02003997 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003998 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3999 lockdep_is_held(&mvm->mutex));
4000 if (IS_ERR_OR_NULL(sta))
4001 continue;
4002
4003 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4004 if (mvm_sta->mac_id_n_color !=
4005 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
4006 continue;
4007
4008 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
4009 }
Sara Sharonced19f22017-02-06 19:09:32 +02004010
4011 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
4012 return;
4013
4014 /* Need to block/unblock also multicast station */
4015 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
4016 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4017 &mvmvif->mcast_sta, disable);
4018
4019 /*
4020 * Only unblock the broadcast station (FW blocks it for immediate
4021 * quiet, not the driver)
4022 */
4023 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
4024 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4025 &mvmvif->bcast_sta, disable);
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03004026}
Luciano Coelhodc88b4b2014-11-10 11:10:14 +02004027
4028void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
4029{
4030 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4031 struct iwl_mvm_sta *mvmsta;
4032
4033 rcu_read_lock();
4034
4035 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
4036
4037 if (!WARN_ON(!mvmsta))
4038 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
4039
4040 rcu_read_unlock();
4041}
Liad Kaufmandd321622017-04-05 16:25:11 +03004042
4043u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
4044{
4045 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
4046
4047 /*
Luca Coelho2f7a3862017-11-15 15:07:34 +02004048 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
Liad Kaufmandd321622017-04-05 16:25:11 +03004049 * to align the wrap around of ssn so we compare relevant values.
4050 */
4051 if (mvm->trans->cfg->gen2)
4052 sn &= 0xff;
4053
4054 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
4055}