blob: e502a6e6bf9025f104afeaac5ec7c1f4a66271f0 [file] [log] [blame]
Johannes Berg8ca151b2013-01-24 14:25:36 +01001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03008 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020010 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010011 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
Emmanuel Grumbach410dc5a2013-02-18 09:22:28 +020027 * in the file called COPYING.
Johannes Berg8ca151b2013-01-24 14:25:36 +010028 *
29 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020030 * Intel Linux Wireless <linuxwifi@intel.com>
Johannes Berg8ca151b2013-01-24 14:25:36 +010031 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +030035 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
Sara Sharon26d6c162017-01-03 12:00:19 +020037 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
Johannes Berg8ca151b2013-01-24 14:25:36 +010038 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67#include <net/mac80211.h>
68
69#include "mvm.h"
70#include "sta.h"
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +030071#include "rs.h"
Johannes Berg8ca151b2013-01-24 14:25:36 +010072
Sara Sharon854c5702016-01-26 13:17:47 +020073/*
74 * New version of ADD_STA_sta command added new fields at the end of the
75 * structure, so sending the size of the relevant API's structure is enough to
76 * support both API versions.
77 */
78static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
79{
80 return iwl_mvm_has_new_rx_api(mvm) ?
81 sizeof(struct iwl_mvm_add_sta_cmd) :
82 sizeof(struct iwl_mvm_add_sta_cmd_v7);
83}
84
Eliad Pellerb92e6612014-01-23 17:58:23 +020085static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
86 enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +010087{
88 int sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +020089 u32 reserved_ids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +010090
Eliad Pellerb92e6612014-01-23 17:58:23 +020091 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
Johannes Berg8ca151b2013-01-24 14:25:36 +010092 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
93
94 lockdep_assert_held(&mvm->mutex);
95
Eliad Pellerb92e6612014-01-23 17:58:23 +020096 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
97 if (iftype != NL80211_IFTYPE_STATION)
98 reserved_ids = BIT(0);
99
Johannes Berg8ca151b2013-01-24 14:25:36 +0100100 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
Sara Sharon0ae98812017-01-04 14:53:58 +0200101 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
Eliad Pellerb92e6612014-01-23 17:58:23 +0200102 if (BIT(sta_id) & reserved_ids)
103 continue;
104
Johannes Berg8ca151b2013-01-24 14:25:36 +0100105 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
106 lockdep_is_held(&mvm->mutex)))
107 return sta_id;
Eliad Pellerb92e6612014-01-23 17:58:23 +0200108 }
Sara Sharon0ae98812017-01-04 14:53:58 +0200109 return IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +0100110}
111
Johannes Berg7a453972013-02-12 13:10:44 +0100112/* send station add/update command to firmware */
113int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Liad Kaufman24afba72015-07-28 18:56:08 +0300114 bool update, unsigned int flags)
Johannes Berg8ca151b2013-01-24 14:25:36 +0100115{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +0100116 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300117 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
118 .sta_id = mvm_sta->sta_id,
119 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
120 .add_modify = update ? 1 : 0,
121 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
122 STA_FLG_MIMO_EN_MSK),
Liad Kaufmancf0cda12015-09-24 10:44:12 +0200123 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
Emmanuel Grumbach4b8265a2014-07-13 08:58:04 +0300124 };
Johannes Berg8ca151b2013-01-24 14:25:36 +0100125 int ret;
126 u32 status;
127 u32 agg_size = 0, mpdu_dens = 0;
128
Liad Kaufman24afba72015-07-28 18:56:08 +0300129 if (!update || (flags & STA_MODIFY_QUEUES)) {
Johannes Berg7a453972013-02-12 13:10:44 +0100130 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
Liad Kaufman24afba72015-07-28 18:56:08 +0300131
Sara Sharonbb497012016-09-29 14:52:40 +0300132 if (!iwl_mvm_has_new_tx_api(mvm)) {
133 add_sta_cmd.tfd_queue_msk =
134 cpu_to_le32(mvm_sta->tfd_queue_msk);
135
136 if (flags & STA_MODIFY_QUEUES)
137 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
138 } else {
139 WARN_ON(flags & STA_MODIFY_QUEUES);
140 }
Johannes Berg7a453972013-02-12 13:10:44 +0100141 }
Johannes Berg5bc5aaa2013-02-12 14:35:36 +0100142
143 switch (sta->bandwidth) {
144 case IEEE80211_STA_RX_BW_160:
145 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
146 /* fall through */
147 case IEEE80211_STA_RX_BW_80:
148 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
149 /* fall through */
150 case IEEE80211_STA_RX_BW_40:
151 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
152 /* fall through */
153 case IEEE80211_STA_RX_BW_20:
154 if (sta->ht_cap.ht_supported)
155 add_sta_cmd.station_flags |=
156 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
157 break;
158 }
159
160 switch (sta->rx_nss) {
161 case 1:
162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
163 break;
164 case 2:
165 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
166 break;
167 case 3 ... 8:
168 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
169 break;
170 }
171
172 switch (sta->smps_mode) {
173 case IEEE80211_SMPS_AUTOMATIC:
174 case IEEE80211_SMPS_NUM_MODES:
175 WARN_ON(1);
176 break;
177 case IEEE80211_SMPS_STATIC:
178 /* override NSS */
179 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
180 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
181 break;
182 case IEEE80211_SMPS_DYNAMIC:
183 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
184 break;
185 case IEEE80211_SMPS_OFF:
186 /* nothing */
187 break;
188 }
Johannes Berg8ca151b2013-01-24 14:25:36 +0100189
190 if (sta->ht_cap.ht_supported) {
191 add_sta_cmd.station_flags_msk |=
192 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
193 STA_FLG_AGG_MPDU_DENS_MSK);
194
195 mpdu_dens = sta->ht_cap.ampdu_density;
196 }
197
198 if (sta->vht_cap.vht_supported) {
199 agg_size = sta->vht_cap.cap &
200 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
201 agg_size >>=
202 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
203 } else if (sta->ht_cap.ht_supported) {
204 agg_size = sta->ht_cap.ampdu_factor;
205 }
206
207 add_sta_cmd.station_flags |=
208 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
209 add_sta_cmd.station_flags |=
210 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
Johannes Berg6ea29ce2016-12-01 16:25:30 +0100211 if (mvm_sta->associated)
212 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100213
Johannes Berg65e25482016-04-13 14:24:22 +0200214 if (sta->wme) {
215 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
216
217 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200218 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
Johannes Berg65e25482016-04-13 14:24:22 +0200219 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200220 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
Johannes Berg65e25482016-04-13 14:24:22 +0200221 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200222 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
Johannes Berg65e25482016-04-13 14:24:22 +0200223 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
Emmanuel Grumbachc80eb572017-01-12 15:43:57 +0200224 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
225 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
Emmanuel Grumbache71ca5e2017-02-08 14:53:32 +0200226 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
Johannes Berg65e25482016-04-13 14:24:22 +0200227 }
228
Johannes Berg8ca151b2013-01-24 14:25:36 +0100229 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +0200230 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
231 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +0300232 &add_sta_cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +0100233 if (ret)
234 return ret;
235
Sara Sharon837c4da2016-01-07 16:50:45 +0200236 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +0100237 case ADD_STA_SUCCESS:
238 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
239 break;
240 default:
241 ret = -EIO;
242 IWL_ERR(mvm, "ADD_STA failed\n");
243 break;
244 }
245
246 return ret;
247}
248
Sara Sharon10b2b202016-03-20 16:23:41 +0200249static void iwl_mvm_rx_agg_session_expired(unsigned long data)
250{
251 struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
252 struct iwl_mvm_baid_data *ba_data;
253 struct ieee80211_sta *sta;
254 struct iwl_mvm_sta *mvm_sta;
255 unsigned long timeout;
256
257 rcu_read_lock();
258
259 ba_data = rcu_dereference(*rcu_ptr);
260
261 if (WARN_ON(!ba_data))
262 goto unlock;
263
264 if (!ba_data->timeout)
265 goto unlock;
266
267 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
268 if (time_is_after_jiffies(timeout)) {
269 mod_timer(&ba_data->session_timer, timeout);
270 goto unlock;
271 }
272
273 /* Timer expired */
274 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
275 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
276 ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
277 sta->addr, ba_data->tid);
278unlock:
279 rcu_read_unlock();
280}
281
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300282static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
283 struct ieee80211_sta *sta)
284{
285 unsigned long used_hw_queues;
286 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +0200287 unsigned int wdg_timeout =
288 iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300289 u32 ac;
290
291 lockdep_assert_held(&mvm->mutex);
292
293 used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
294
295 /* Find available queues, and allocate them to the ACs */
296 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
297 u8 queue = find_first_zero_bit(&used_hw_queues,
298 mvm->first_agg_queue);
299
300 if (queue >= mvm->first_agg_queue) {
301 IWL_ERR(mvm, "Failed to allocate STA queue\n");
302 return -EBUSY;
303 }
304
305 __set_bit(queue, &used_hw_queues);
306 mvmsta->hw_queue[ac] = queue;
307 }
308
309 /* Found a place for all queues - enable them */
310 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
311 iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
Liad Kaufman4ecafae2015-07-14 13:36:18 +0300312 mvmsta->hw_queue[ac],
Liad Kaufman5c1156e2015-07-22 17:59:53 +0300313 iwl_mvm_ac_to_tx_fifo[ac], 0,
314 wdg_timeout);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300315 mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
316 }
317
318 return 0;
319}
320
321static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
322 struct ieee80211_sta *sta)
323{
324 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
325 unsigned long sta_msk;
326 int i;
327
328 lockdep_assert_held(&mvm->mutex);
329
330 /* disable the TDLS STA-specific queues */
331 sta_msk = mvmsta->tfd_queue_msk;
Emmanuel Grumbacha4ca3ed2015-01-20 17:07:10 +0200332 for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
Arik Nemtsov06ecdba2015-10-12 14:47:11 +0300333 iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +0300334}
335
Liad Kaufman9794c642015-08-19 17:34:28 +0300336/* Disable aggregations for a bitmap of TIDs for a given station */
337static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
338 unsigned long disable_agg_tids,
339 bool remove_queue)
340{
341 struct iwl_mvm_add_sta_cmd cmd = {};
342 struct ieee80211_sta *sta;
343 struct iwl_mvm_sta *mvmsta;
344 u32 status;
345 u8 sta_id;
346 int ret;
347
Sara Sharonbb497012016-09-29 14:52:40 +0300348 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
349 return -EINVAL;
350
Liad Kaufman9794c642015-08-19 17:34:28 +0300351 spin_lock_bh(&mvm->queue_info_lock);
352 sta_id = mvm->queue_info[queue].ra_sta_id;
353 spin_unlock_bh(&mvm->queue_info_lock);
354
355 rcu_read_lock();
356
357 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
358
359 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
360 rcu_read_unlock();
361 return -EINVAL;
362 }
363
364 mvmsta = iwl_mvm_sta_from_mac80211(sta);
365
366 mvmsta->tid_disable_agg |= disable_agg_tids;
367
368 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
369 cmd.sta_id = mvmsta->sta_id;
370 cmd.add_modify = STA_MODE_MODIFY;
371 cmd.modify_mask = STA_MODIFY_QUEUES;
372 if (disable_agg_tids)
373 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
374 if (remove_queue)
375 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
376 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
377 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
378
379 rcu_read_unlock();
380
381 /* Notify FW of queue removal from the STA queues */
382 status = ADD_STA_SUCCESS;
383 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
384 iwl_mvm_add_sta_cmd_size(mvm),
385 &cmd, &status);
386
387 return ret;
388}
389
Liad Kaufman42db09c2016-05-02 14:01:14 +0300390static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
391{
392 struct ieee80211_sta *sta;
393 struct iwl_mvm_sta *mvmsta;
394 unsigned long tid_bitmap;
395 unsigned long agg_tids = 0;
396 s8 sta_id;
397 int tid;
398
399 lockdep_assert_held(&mvm->mutex);
400
Sara Sharonbb497012016-09-29 14:52:40 +0300401 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
402 return -EINVAL;
403
Liad Kaufman42db09c2016-05-02 14:01:14 +0300404 spin_lock_bh(&mvm->queue_info_lock);
405 sta_id = mvm->queue_info[queue].ra_sta_id;
406 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
407 spin_unlock_bh(&mvm->queue_info_lock);
408
409 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
410 lockdep_is_held(&mvm->mutex));
411
412 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
413 return -EINVAL;
414
415 mvmsta = iwl_mvm_sta_from_mac80211(sta);
416
417 spin_lock_bh(&mvmsta->lock);
418 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
419 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
420 agg_tids |= BIT(tid);
421 }
422 spin_unlock_bh(&mvmsta->lock);
423
424 return agg_tids;
425}
426
Liad Kaufman9794c642015-08-19 17:34:28 +0300427/*
428 * Remove a queue from a station's resources.
429 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
430 * doesn't disable the queue
431 */
432static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
433{
434 struct ieee80211_sta *sta;
435 struct iwl_mvm_sta *mvmsta;
436 unsigned long tid_bitmap;
437 unsigned long disable_agg_tids = 0;
438 u8 sta_id;
439 int tid;
440
441 lockdep_assert_held(&mvm->mutex);
442
Sara Sharonbb497012016-09-29 14:52:40 +0300443 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
444 return -EINVAL;
445
Liad Kaufman9794c642015-08-19 17:34:28 +0300446 spin_lock_bh(&mvm->queue_info_lock);
447 sta_id = mvm->queue_info[queue].ra_sta_id;
448 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
449 spin_unlock_bh(&mvm->queue_info_lock);
450
451 rcu_read_lock();
452
453 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
454
455 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
456 rcu_read_unlock();
457 return 0;
458 }
459
460 mvmsta = iwl_mvm_sta_from_mac80211(sta);
461
462 spin_lock_bh(&mvmsta->lock);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300463 /* Unmap MAC queues and TIDs from this queue */
Liad Kaufman9794c642015-08-19 17:34:28 +0300464 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300465 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
466 disable_agg_tids |= BIT(tid);
Liad Kaufman42db09c2016-05-02 14:01:14 +0300467 mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
Liad Kaufman9794c642015-08-19 17:34:28 +0300468 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300469
Liad Kaufman42db09c2016-05-02 14:01:14 +0300470 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
Liad Kaufman9794c642015-08-19 17:34:28 +0300471 spin_unlock_bh(&mvmsta->lock);
472
473 rcu_read_unlock();
474
Liad Kaufman9794c642015-08-19 17:34:28 +0300475 return disable_agg_tids;
476}
477
Sara Sharon01796ff2016-11-16 17:04:36 +0200478static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
479 bool same_sta)
480{
481 struct iwl_mvm_sta *mvmsta;
482 u8 txq_curr_ac, sta_id, tid;
483 unsigned long disable_agg_tids = 0;
484 int ret;
485
486 lockdep_assert_held(&mvm->mutex);
487
Sara Sharonbb497012016-09-29 14:52:40 +0300488 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
489 return -EINVAL;
490
Sara Sharon01796ff2016-11-16 17:04:36 +0200491 spin_lock_bh(&mvm->queue_info_lock);
492 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
493 sta_id = mvm->queue_info[queue].ra_sta_id;
494 tid = mvm->queue_info[queue].txq_tid;
495 spin_unlock_bh(&mvm->queue_info_lock);
496
497 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
Sharon Dvire3df1e42017-02-21 10:41:31 +0200498 if (WARN_ON(!mvmsta))
499 return -EINVAL;
Sara Sharon01796ff2016-11-16 17:04:36 +0200500
501 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
502 /* Disable the queue */
503 if (disable_agg_tids)
504 iwl_mvm_invalidate_sta_queue(mvm, queue,
505 disable_agg_tids, false);
506
507 ret = iwl_mvm_disable_txq(mvm, queue,
508 mvmsta->vif->hw_queue[txq_curr_ac],
509 tid, 0);
510 if (ret) {
511 /* Re-mark the inactive queue as inactive */
512 spin_lock_bh(&mvm->queue_info_lock);
513 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
514 spin_unlock_bh(&mvm->queue_info_lock);
515 IWL_ERR(mvm,
516 "Failed to free inactive queue %d (ret=%d)\n",
517 queue, ret);
518
519 return ret;
520 }
521
522 /* If TXQ is allocated to another STA, update removal in FW */
523 if (!same_sta)
524 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
525
526 return 0;
527}
528
Liad Kaufman42db09c2016-05-02 14:01:14 +0300529static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
530 unsigned long tfd_queue_mask, u8 ac)
531{
532 int queue = 0;
533 u8 ac_to_queue[IEEE80211_NUM_ACS];
534 int i;
535
536 lockdep_assert_held(&mvm->queue_info_lock);
Sara Sharonbb497012016-09-29 14:52:40 +0300537 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
538 return -EINVAL;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300539
540 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
541
542 /* See what ACs the existing queues for this STA have */
543 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
544 /* Only DATA queues can be shared */
545 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
546 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
547 continue;
548
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200549 /* Don't try and take queues being reconfigured */
550 if (mvm->queue_info[queue].status ==
551 IWL_MVM_QUEUE_RECONFIGURING)
552 continue;
553
Liad Kaufman42db09c2016-05-02 14:01:14 +0300554 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
555 }
556
557 /*
558 * The queue to share is chosen only from DATA queues as follows (in
559 * descending priority):
560 * 1. An AC_BE queue
561 * 2. Same AC queue
562 * 3. Highest AC queue that is lower than new AC
563 * 4. Any existing AC (there always is at least 1 DATA queue)
564 */
565
566 /* Priority 1: An AC_BE queue */
567 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
568 queue = ac_to_queue[IEEE80211_AC_BE];
569 /* Priority 2: Same AC queue */
570 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
571 queue = ac_to_queue[ac];
572 /* Priority 3a: If new AC is VO and VI exists - use VI */
573 else if (ac == IEEE80211_AC_VO &&
574 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
575 queue = ac_to_queue[IEEE80211_AC_VI];
576 /* Priority 3b: No BE so only AC less than the new one is BK */
577 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
578 queue = ac_to_queue[IEEE80211_AC_BK];
579 /* Priority 4a: No BE nor BK - use VI if exists */
580 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
581 queue = ac_to_queue[IEEE80211_AC_VI];
582 /* Priority 4b: No BE, BK nor VI - use VO if exists */
583 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
584 queue = ac_to_queue[IEEE80211_AC_VO];
585
586 /* Make sure queue found (or not) is legal */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200587 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
588 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
589 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
Liad Kaufman42db09c2016-05-02 14:01:14 +0300590 IWL_ERR(mvm, "No DATA queues available to share\n");
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200591 return -ENOSPC;
592 }
593
594 /* Make sure the queue isn't in the middle of being reconfigured */
595 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
596 IWL_ERR(mvm,
597 "TXQ %d is in the middle of re-config - try again\n",
598 queue);
599 return -EBUSY;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300600 }
601
602 return queue;
603}
604
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200605/*
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200606 * If a given queue has a higher AC than the TID stream that is being compared
607 * to, the queue needs to be redirected to the lower AC. This function does that
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200608 * in such a case, otherwise - if no redirection required - it does nothing,
609 * unless the %force param is true.
610 */
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200611int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
612 int ac, int ssn, unsigned int wdg_timeout,
613 bool force)
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200614{
615 struct iwl_scd_txq_cfg_cmd cmd = {
616 .scd_queue = queue,
Liad Kaufmanf7c692d2016-03-08 10:41:32 +0200617 .action = SCD_CFG_DISABLE_QUEUE,
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200618 };
619 bool shared_queue;
620 unsigned long mq;
621 int ret;
622
Sara Sharonbb497012016-09-29 14:52:40 +0300623 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
624 return -EINVAL;
625
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200626 /*
627 * If the AC is lower than current one - FIFO needs to be redirected to
628 * the lowest one of the streams in the queue. Check if this is needed
629 * here.
630 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
631 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
632 * we need to check if the numerical value of X is LARGER than of Y.
633 */
634 spin_lock_bh(&mvm->queue_info_lock);
635 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
636 spin_unlock_bh(&mvm->queue_info_lock);
637
638 IWL_DEBUG_TX_QUEUES(mvm,
639 "No redirection needed on TXQ #%d\n",
640 queue);
641 return 0;
642 }
643
644 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
645 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200646 cmd.tid = mvm->queue_info[queue].txq_tid;
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200647 mq = mvm->queue_info[queue].hw_queue_to_mac80211;
648 shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
649 spin_unlock_bh(&mvm->queue_info_lock);
650
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200651 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200652 queue, iwl_mvm_ac_to_tx_fifo[ac]);
653
654 /* Stop MAC queues and wait for this queue to empty */
655 iwl_mvm_stop_mac_queues(mvm, mq);
656 ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue));
657 if (ret) {
658 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
659 queue);
660 ret = -EIO;
661 goto out;
662 }
663
664 /* Before redirecting the queue we need to de-activate it */
665 iwl_trans_txq_disable(mvm->trans, queue, false);
666 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
667 if (ret)
668 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
669 ret);
670
671 /* Make sure the SCD wrptr is correctly set before reconfiguring */
Sara Sharonca3b9c62016-06-30 16:14:02 +0300672 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200673
Liad Kaufmanedbe9612016-02-02 15:43:32 +0200674 /* Update the TID "owner" of the queue */
675 spin_lock_bh(&mvm->queue_info_lock);
676 mvm->queue_info[queue].txq_tid = tid;
677 spin_unlock_bh(&mvm->queue_info_lock);
678
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200679 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
680
681 /* Redirect to lower AC */
682 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
683 cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
684 ssn);
685
686 /* Update AC marking of the queue */
687 spin_lock_bh(&mvm->queue_info_lock);
688 mvm->queue_info[queue].mac80211_ac = ac;
689 spin_unlock_bh(&mvm->queue_info_lock);
690
691 /*
692 * Mark queue as shared in transport if shared
693 * Note this has to be done after queue enablement because enablement
694 * can also set this value, and there is no indication there to shared
695 * queues
696 */
697 if (shared_queue)
698 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
699
700out:
701 /* Continue using the MAC queues */
702 iwl_mvm_start_mac_queues(mvm, mq);
703
704 return ret;
705}
706
Sara Sharon310181e2017-01-17 14:27:48 +0200707static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
708 struct ieee80211_sta *sta, u8 ac,
709 int tid)
710{
711 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
712 unsigned int wdg_timeout =
713 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
714 u8 mac_queue = mvmsta->vif->hw_queue[ac];
715 int queue = -1;
716
717 lockdep_assert_held(&mvm->mutex);
718
719 IWL_DEBUG_TX_QUEUES(mvm,
720 "Allocating queue for sta %d on tid %d\n",
721 mvmsta->sta_id, tid);
722 queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
723 wdg_timeout);
724 if (queue < 0)
725 return queue;
726
727 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
728
729 spin_lock_bh(&mvmsta->lock);
730 mvmsta->tid_data[tid].txq_id = queue;
731 mvmsta->tid_data[tid].is_tid_active = true;
732 mvmsta->tfd_queue_msk |= BIT(queue);
733 spin_unlock_bh(&mvmsta->lock);
734
735 spin_lock_bh(&mvm->queue_info_lock);
736 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
737 spin_unlock_bh(&mvm->queue_info_lock);
738
739 return 0;
740}
741
Liad Kaufman24afba72015-07-28 18:56:08 +0300742static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
743 struct ieee80211_sta *sta, u8 ac, int tid,
744 struct ieee80211_hdr *hdr)
745{
746 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
747 struct iwl_trans_txq_scd_cfg cfg = {
748 .fifo = iwl_mvm_ac_to_tx_fifo[ac],
749 .sta_id = mvmsta->sta_id,
750 .tid = tid,
751 .frame_limit = IWL_FRAME_LIMIT,
752 };
753 unsigned int wdg_timeout =
754 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
755 u8 mac_queue = mvmsta->vif->hw_queue[ac];
756 int queue = -1;
Sara Sharon01796ff2016-11-16 17:04:36 +0200757 bool using_inactive_queue = false, same_sta = false;
Liad Kaufman9794c642015-08-19 17:34:28 +0300758 unsigned long disable_agg_tids = 0;
759 enum iwl_mvm_agg_state queue_state;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300760 bool shared_queue = false;
Liad Kaufman24afba72015-07-28 18:56:08 +0300761 int ssn;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300762 unsigned long tfd_queue_mask;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300763 int ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300764
765 lockdep_assert_held(&mvm->mutex);
766
Sara Sharon310181e2017-01-17 14:27:48 +0200767 if (iwl_mvm_has_new_tx_api(mvm))
768 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
769
Liad Kaufman42db09c2016-05-02 14:01:14 +0300770 spin_lock_bh(&mvmsta->lock);
771 tfd_queue_mask = mvmsta->tfd_queue_msk;
772 spin_unlock_bh(&mvmsta->lock);
773
Liad Kaufmand2515a92016-03-23 16:31:08 +0200774 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300775
776 /*
777 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
778 * exists
779 */
780 if (!ieee80211_is_data_qos(hdr->frame_control) ||
781 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
Liad Kaufman9794c642015-08-19 17:34:28 +0300782 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
783 IWL_MVM_DQA_MIN_MGMT_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300784 IWL_MVM_DQA_MAX_MGMT_QUEUE);
785 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
786 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
787 queue);
788
789 /* If no such queue is found, we'll use a DATA queue instead */
790 }
791
Liad Kaufman9794c642015-08-19 17:34:28 +0300792 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
793 (mvm->queue_info[mvmsta->reserved_queue].status ==
794 IWL_MVM_QUEUE_RESERVED ||
795 mvm->queue_info[mvmsta->reserved_queue].status ==
796 IWL_MVM_QUEUE_INACTIVE)) {
Liad Kaufman24afba72015-07-28 18:56:08 +0300797 queue = mvmsta->reserved_queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300798 mvm->queue_info[queue].reserved = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300799 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
800 }
801
802 if (queue < 0)
Liad Kaufman9794c642015-08-19 17:34:28 +0300803 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
804 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufman24afba72015-07-28 18:56:08 +0300805 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufmancf961e12015-08-13 19:16:08 +0300806
807 /*
Liad Kaufman9794c642015-08-19 17:34:28 +0300808 * Check if this queue is already allocated but inactive.
809 * In such a case, we'll need to first free this queue before enabling
810 * it again, so we'll mark it as reserved to make sure no new traffic
811 * arrives on it
812 */
813 if (queue > 0 &&
814 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
815 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
816 using_inactive_queue = true;
Sara Sharon01796ff2016-11-16 17:04:36 +0200817 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
Liad Kaufman9794c642015-08-19 17:34:28 +0300818 IWL_DEBUG_TX_QUEUES(mvm,
819 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
820 queue, mvmsta->sta_id, tid);
821 }
822
Liad Kaufman42db09c2016-05-02 14:01:14 +0300823 /* No free queue - we'll have to share */
824 if (queue <= 0) {
825 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
826 if (queue > 0) {
827 shared_queue = true;
828 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
829 }
830 }
831
Liad Kaufman9794c642015-08-19 17:34:28 +0300832 /*
Liad Kaufmancf961e12015-08-13 19:16:08 +0300833 * Mark TXQ as ready, even though it hasn't been fully configured yet,
834 * to make sure no one else takes it.
835 * This will allow avoiding re-acquiring the lock at the end of the
836 * configuration. On error we'll mark it back as free.
837 */
Liad Kaufman42db09c2016-05-02 14:01:14 +0300838 if ((queue > 0) && !shared_queue)
Liad Kaufmancf961e12015-08-13 19:16:08 +0300839 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman24afba72015-07-28 18:56:08 +0300840
Liad Kaufmand2515a92016-03-23 16:31:08 +0200841 spin_unlock_bh(&mvm->queue_info_lock);
Liad Kaufman24afba72015-07-28 18:56:08 +0300842
Liad Kaufman42db09c2016-05-02 14:01:14 +0300843 /* This shouldn't happen - out of queues */
844 if (WARN_ON(queue <= 0)) {
845 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
846 tid, cfg.sta_id);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200847 return queue;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300848 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300849
850 /*
851 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
852 * but for configuring the SCD to send A-MPDUs we need to mark the queue
853 * as aggregatable.
854 * Mark all DATA queues as allowing to be aggregated at some point
855 */
Liad Kaufmand5216a22015-08-09 15:50:51 +0300856 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
857 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +0300858
Liad Kaufman9794c642015-08-19 17:34:28 +0300859 /*
860 * If this queue was previously inactive (idle) - we need to free it
861 * first
862 */
863 if (using_inactive_queue) {
Sara Sharon01796ff2016-11-16 17:04:36 +0200864 ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
865 if (ret)
Liad Kaufman9794c642015-08-19 17:34:28 +0300866 return ret;
Liad Kaufman9794c642015-08-19 17:34:28 +0300867 }
868
Liad Kaufman42db09c2016-05-02 14:01:14 +0300869 IWL_DEBUG_TX_QUEUES(mvm,
870 "Allocating %squeue #%d to sta %d on tid %d\n",
871 shared_queue ? "shared " : "", queue,
872 mvmsta->sta_id, tid);
873
874 if (shared_queue) {
875 /* Disable any open aggs on this queue */
876 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
877
878 if (disable_agg_tids) {
879 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
880 queue);
881 iwl_mvm_invalidate_sta_queue(mvm, queue,
882 disable_agg_tids, false);
883 }
Liad Kaufman42db09c2016-05-02 14:01:14 +0300884 }
Liad Kaufman24afba72015-07-28 18:56:08 +0300885
886 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
887 iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
888 wdg_timeout);
889
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200890 /*
891 * Mark queue as shared in transport if shared
892 * Note this has to be done after queue enablement because enablement
893 * can also set this value, and there is no indication there to shared
894 * queues
895 */
896 if (shared_queue)
897 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
898
Liad Kaufman24afba72015-07-28 18:56:08 +0300899 spin_lock_bh(&mvmsta->lock);
900 mvmsta->tid_data[tid].txq_id = queue;
Liad Kaufman9794c642015-08-19 17:34:28 +0300901 mvmsta->tid_data[tid].is_tid_active = true;
Liad Kaufman24afba72015-07-28 18:56:08 +0300902 mvmsta->tfd_queue_msk |= BIT(queue);
Liad Kaufman9794c642015-08-19 17:34:28 +0300903 queue_state = mvmsta->tid_data[tid].state;
Liad Kaufman24afba72015-07-28 18:56:08 +0300904
905 if (mvmsta->reserved_queue == queue)
906 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
907 spin_unlock_bh(&mvmsta->lock);
908
Liad Kaufman42db09c2016-05-02 14:01:14 +0300909 if (!shared_queue) {
910 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
911 if (ret)
912 goto out_err;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300913
Liad Kaufman42db09c2016-05-02 14:01:14 +0300914 /* If we need to re-enable aggregations... */
915 if (queue_state == IWL_AGG_ON) {
916 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
917 if (ret)
918 goto out_err;
919 }
Liad Kaufman58f2cc52015-09-30 16:44:28 +0200920 } else {
921 /* Redirect queue, if needed */
922 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
923 wdg_timeout, false);
924 if (ret)
925 goto out_err;
Liad Kaufman42db09c2016-05-02 14:01:14 +0300926 }
Liad Kaufman9794c642015-08-19 17:34:28 +0300927
Liad Kaufman42db09c2016-05-02 14:01:14 +0300928 return 0;
Liad Kaufmancf961e12015-08-13 19:16:08 +0300929
930out_err:
931 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
932
933 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +0300934}
935
Liad Kaufman19aefa42016-03-08 14:29:51 +0200936static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
937{
938 struct iwl_scd_txq_cfg_cmd cmd = {
939 .scd_queue = queue,
940 .action = SCD_CFG_UPDATE_QUEUE_TID,
941 };
Liad Kaufman19aefa42016-03-08 14:29:51 +0200942 int tid;
943 unsigned long tid_bitmap;
944 int ret;
945
946 lockdep_assert_held(&mvm->mutex);
947
Sara Sharonbb497012016-09-29 14:52:40 +0300948 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
949 return;
950
Liad Kaufman19aefa42016-03-08 14:29:51 +0200951 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufman19aefa42016-03-08 14:29:51 +0200952 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
953 spin_unlock_bh(&mvm->queue_info_lock);
954
955 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
956 return;
957
958 /* Find any TID for queue */
959 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
960 cmd.tid = tid;
961 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
962
963 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
Liad Kaufman341ca402016-09-18 14:51:59 +0300964 if (ret) {
Liad Kaufman19aefa42016-03-08 14:29:51 +0200965 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
966 queue, ret);
Liad Kaufman341ca402016-09-18 14:51:59 +0300967 return;
968 }
969
970 spin_lock_bh(&mvm->queue_info_lock);
971 mvm->queue_info[queue].txq_tid = tid;
972 spin_unlock_bh(&mvm->queue_info_lock);
973 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
974 queue, tid);
Liad Kaufman19aefa42016-03-08 14:29:51 +0200975}
976
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200977static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
978{
979 struct ieee80211_sta *sta;
980 struct iwl_mvm_sta *mvmsta;
981 s8 sta_id;
982 int tid = -1;
983 unsigned long tid_bitmap;
984 unsigned int wdg_timeout;
985 int ssn;
986 int ret = true;
987
Sara Sharonbb497012016-09-29 14:52:40 +0300988 /* queue sharing is disabled on new TX path */
989 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
990 return;
991
Liad Kaufman9f9af3d2015-12-23 16:03:46 +0200992 lockdep_assert_held(&mvm->mutex);
993
994 spin_lock_bh(&mvm->queue_info_lock);
995 sta_id = mvm->queue_info[queue].ra_sta_id;
996 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
997 spin_unlock_bh(&mvm->queue_info_lock);
998
999 /* Find TID for queue, and make sure it is the only one on the queue */
1000 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1001 if (tid_bitmap != BIT(tid)) {
1002 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
1003 queue, tid_bitmap);
1004 return;
1005 }
1006
1007 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1008 tid);
1009
1010 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1011 lockdep_is_held(&mvm->mutex));
1012
1013 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1014 return;
1015
1016 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1017 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1018
1019 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1020
1021 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
1022 tid_to_mac80211_ac[tid], ssn,
1023 wdg_timeout, true);
1024 if (ret) {
1025 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1026 return;
1027 }
1028
1029 /* If aggs should be turned back on - do it */
1030 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
Emmanuel Grumbach9cd70e82016-09-20 13:40:33 +03001031 struct iwl_mvm_add_sta_cmd cmd = {0};
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001032
1033 mvmsta->tid_disable_agg &= ~BIT(tid);
1034
1035 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1036 cmd.sta_id = mvmsta->sta_id;
1037 cmd.add_modify = STA_MODE_MODIFY;
1038 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1039 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1040 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1041
1042 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1043 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1044 if (!ret) {
1045 IWL_DEBUG_TX_QUEUES(mvm,
1046 "TXQ #%d is now aggregated again\n",
1047 queue);
1048
1049 /* Mark queue intenally as aggregating again */
1050 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1051 }
1052 }
1053
1054 spin_lock_bh(&mvm->queue_info_lock);
1055 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1056 spin_unlock_bh(&mvm->queue_info_lock);
1057}
1058
Liad Kaufman24afba72015-07-28 18:56:08 +03001059static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1060{
1061 if (tid == IWL_MAX_TID_COUNT)
1062 return IEEE80211_AC_VO; /* MGMT */
1063
1064 return tid_to_mac80211_ac[tid];
1065}
1066
1067static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
1068 struct ieee80211_sta *sta, int tid)
1069{
1070 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1071 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1072 struct sk_buff *skb;
1073 struct ieee80211_hdr *hdr;
1074 struct sk_buff_head deferred_tx;
1075 u8 mac_queue;
1076 bool no_queue = false; /* Marks if there is a problem with the queue */
1077 u8 ac;
1078
1079 lockdep_assert_held(&mvm->mutex);
1080
1081 skb = skb_peek(&tid_data->deferred_tx_frames);
1082 if (!skb)
1083 return;
1084 hdr = (void *)skb->data;
1085
1086 ac = iwl_mvm_tid_to_ac_queue(tid);
1087 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1088
1089 if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
1090 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1091 IWL_ERR(mvm,
1092 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1093 mvmsta->sta_id, tid);
1094
1095 /*
1096 * Mark queue as problematic so later the deferred traffic is
1097 * freed, as we can do nothing with it
1098 */
1099 no_queue = true;
1100 }
1101
1102 __skb_queue_head_init(&deferred_tx);
1103
Liad Kaufmand2515a92016-03-23 16:31:08 +02001104 /* Disable bottom-halves when entering TX path */
1105 local_bh_disable();
Liad Kaufman24afba72015-07-28 18:56:08 +03001106 spin_lock(&mvmsta->lock);
1107 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
Liad Kaufmanad5de732016-09-27 16:01:10 +03001108 mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
Liad Kaufman24afba72015-07-28 18:56:08 +03001109 spin_unlock(&mvmsta->lock);
1110
Liad Kaufman24afba72015-07-28 18:56:08 +03001111 while ((skb = __skb_dequeue(&deferred_tx)))
1112 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1113 ieee80211_free_txskb(mvm->hw, skb);
1114 local_bh_enable();
1115
1116 /* Wake queue */
1117 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1118}
1119
1120void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1121{
1122 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1123 add_stream_wk);
1124 struct ieee80211_sta *sta;
1125 struct iwl_mvm_sta *mvmsta;
1126 unsigned long deferred_tid_traffic;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001127 int queue, sta_id, tid;
Liad Kaufman24afba72015-07-28 18:56:08 +03001128
Liad Kaufman9794c642015-08-19 17:34:28 +03001129 /* Check inactivity of queues */
1130 iwl_mvm_inactivity_check(mvm);
1131
Liad Kaufman24afba72015-07-28 18:56:08 +03001132 mutex_lock(&mvm->mutex);
1133
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001134 /* Reconfigure queues requiring reconfiguation */
1135 for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) {
1136 bool reconfig;
Liad Kaufman19aefa42016-03-08 14:29:51 +02001137 bool change_owner;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001138
1139 spin_lock_bh(&mvm->queue_info_lock);
1140 reconfig = (mvm->queue_info[queue].status ==
1141 IWL_MVM_QUEUE_RECONFIGURING);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001142
1143 /*
1144 * We need to take into account a situation in which a TXQ was
1145 * allocated to TID x, and then turned shared by adding TIDs y
1146 * and z. If TID x becomes inactive and is removed from the TXQ,
1147 * ownership must be given to one of the remaining TIDs.
1148 * This is mainly because if TID x continues - a new queue can't
1149 * be allocated for it as long as it is an owner of another TXQ.
1150 */
1151 change_owner = !(mvm->queue_info[queue].tid_bitmap &
1152 BIT(mvm->queue_info[queue].txq_tid)) &&
1153 (mvm->queue_info[queue].status ==
1154 IWL_MVM_QUEUE_SHARED);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001155 spin_unlock_bh(&mvm->queue_info_lock);
1156
1157 if (reconfig)
1158 iwl_mvm_unshare_queue(mvm, queue);
Liad Kaufman19aefa42016-03-08 14:29:51 +02001159 else if (change_owner)
1160 iwl_mvm_change_queue_owner(mvm, queue);
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02001161 }
1162
Liad Kaufman24afba72015-07-28 18:56:08 +03001163 /* Go over all stations with deferred traffic */
1164 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1165 IWL_MVM_STATION_COUNT) {
1166 clear_bit(sta_id, mvm->sta_deferred_frames);
1167 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1168 lockdep_is_held(&mvm->mutex));
1169 if (IS_ERR_OR_NULL(sta))
1170 continue;
1171
1172 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1173 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1174
1175 for_each_set_bit(tid, &deferred_tid_traffic,
1176 IWL_MAX_TID_COUNT + 1)
1177 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1178 }
1179
1180 mutex_unlock(&mvm->mutex);
1181}
1182
1183static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001184 struct ieee80211_sta *sta,
1185 enum nl80211_iftype vif_type)
Liad Kaufman24afba72015-07-28 18:56:08 +03001186{
1187 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1188 int queue;
Sara Sharon01796ff2016-11-16 17:04:36 +02001189 bool using_inactive_queue = false, same_sta = false;
Liad Kaufman24afba72015-07-28 18:56:08 +03001190
Liad Kaufman9794c642015-08-19 17:34:28 +03001191 /*
1192 * Check for inactive queues, so we don't reach a situation where we
1193 * can't add a STA due to a shortage in queues that doesn't really exist
1194 */
1195 iwl_mvm_inactivity_check(mvm);
1196
Liad Kaufman24afba72015-07-28 18:56:08 +03001197 spin_lock_bh(&mvm->queue_info_lock);
1198
1199 /* Make sure we have free resources for this STA */
Liad Kaufmand5216a22015-08-09 15:50:51 +03001200 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1201 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
Liad Kaufmancf961e12015-08-13 19:16:08 +03001202 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1203 IWL_MVM_QUEUE_FREE))
Liad Kaufmand5216a22015-08-09 15:50:51 +03001204 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1205 else
Liad Kaufman9794c642015-08-19 17:34:28 +03001206 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1207 IWL_MVM_DQA_MIN_DATA_QUEUE,
Liad Kaufmand5216a22015-08-09 15:50:51 +03001208 IWL_MVM_DQA_MAX_DATA_QUEUE);
Liad Kaufman24afba72015-07-28 18:56:08 +03001209 if (queue < 0) {
1210 spin_unlock_bh(&mvm->queue_info_lock);
1211 IWL_ERR(mvm, "No available queues for new station\n");
1212 return -ENOSPC;
Sara Sharon01796ff2016-11-16 17:04:36 +02001213 } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
1214 /*
1215 * If this queue is already allocated but inactive we'll need to
1216 * first free this queue before enabling it again, we'll mark
1217 * it as reserved to make sure no new traffic arrives on it
1218 */
1219 using_inactive_queue = true;
1220 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
Liad Kaufman24afba72015-07-28 18:56:08 +03001221 }
Liad Kaufmancf961e12015-08-13 19:16:08 +03001222 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman24afba72015-07-28 18:56:08 +03001223
1224 spin_unlock_bh(&mvm->queue_info_lock);
1225
1226 mvmsta->reserved_queue = queue;
1227
Sara Sharon01796ff2016-11-16 17:04:36 +02001228 if (using_inactive_queue)
1229 iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
1230
Liad Kaufman24afba72015-07-28 18:56:08 +03001231 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1232 queue, mvmsta->sta_id);
1233
1234 return 0;
1235}
1236
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001237/*
1238 * In DQA mode, after a HW restart the queues should be allocated as before, in
1239 * order to avoid race conditions when there are shared queues. This function
1240 * does the re-mapping and queue allocation.
1241 *
1242 * Note that re-enabling aggregations isn't done in this function.
1243 */
1244static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1245 struct iwl_mvm_sta *mvm_sta)
1246{
1247 unsigned int wdg_timeout =
1248 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1249 int i;
1250 struct iwl_trans_txq_scd_cfg cfg = {
1251 .sta_id = mvm_sta->sta_id,
1252 .frame_limit = IWL_FRAME_LIMIT,
1253 };
1254
Johannes Berg03c902b2016-12-02 12:03:36 +01001255 /* Make sure reserved queue is still marked as such (if allocated) */
1256 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1257 mvm->queue_info[mvm_sta->reserved_queue].status =
1258 IWL_MVM_QUEUE_RESERVED;
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001259
1260 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1261 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1262 int txq_id = tid_data->txq_id;
1263 int ac;
1264 u8 mac_queue;
1265
1266 if (txq_id == IEEE80211_INVAL_HW_QUEUE)
1267 continue;
1268
1269 skb_queue_head_init(&tid_data->deferred_tx_frames);
1270
1271 ac = tid_to_mac80211_ac[i];
1272 mac_queue = mvm_sta->vif->hw_queue[ac];
1273
Sara Sharon310181e2017-01-17 14:27:48 +02001274 if (iwl_mvm_has_new_tx_api(mvm)) {
1275 IWL_DEBUG_TX_QUEUES(mvm,
1276 "Re-mapping sta %d tid %d\n",
1277 mvm_sta->sta_id, i);
1278 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
1279 mvm_sta->sta_id,
1280 i, wdg_timeout);
1281 tid_data->txq_id = txq_id;
1282 } else {
1283 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001284
Sara Sharon310181e2017-01-17 14:27:48 +02001285 cfg.tid = i;
1286 cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
1287 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1288 txq_id ==
1289 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001290
Sara Sharon310181e2017-01-17 14:27:48 +02001291 IWL_DEBUG_TX_QUEUES(mvm,
1292 "Re-mapping sta %d tid %d to queue %d\n",
1293 mvm_sta->sta_id, i, txq_id);
1294
1295 iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
1296 wdg_timeout);
1297 }
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001298
1299 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1300 }
1301
1302 atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
1303}
1304
Johannes Berg8ca151b2013-01-24 14:25:36 +01001305int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1306 struct ieee80211_vif *vif,
1307 struct ieee80211_sta *sta)
1308{
1309 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001310 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharona571f5f2015-12-07 12:50:58 +02001311 struct iwl_mvm_rxq_dup_data *dup_data;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001312 int i, ret, sta_id;
1313
1314 lockdep_assert_held(&mvm->mutex);
1315
1316 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
Eliad Pellerb92e6612014-01-23 17:58:23 +02001317 sta_id = iwl_mvm_find_free_sta_id(mvm,
1318 ieee80211_vif_type_p2p(vif));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001319 else
1320 sta_id = mvm_sta->sta_id;
1321
Sara Sharon0ae98812017-01-04 14:53:58 +02001322 if (sta_id == IWL_MVM_INVALID_STA)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001323 return -ENOSPC;
1324
1325 spin_lock_init(&mvm_sta->lock);
1326
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001327 /* In DQA mode, if this is a HW restart, re-alloc existing queues */
1328 if (iwl_mvm_is_dqa_supported(mvm) &&
1329 test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1330 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
1331 goto update_fw;
1332 }
1333
Johannes Berg8ca151b2013-01-24 14:25:36 +01001334 mvm_sta->sta_id = sta_id;
1335 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1336 mvmvif->color);
1337 mvm_sta->vif = vif;
1338 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03001339 mvm_sta->tx_protection = 0;
1340 mvm_sta->tt_tx_protection = false;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001341
1342 /* HW restart, don't assume the memory has been zeroed */
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001343 atomic_set(&mvm->pending_frames[sta_id], 0);
Liad Kaufman69191af2015-09-01 18:50:22 +03001344 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
Johannes Berg8ca151b2013-01-24 14:25:36 +01001345 mvm_sta->tfd_queue_msk = 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001346
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001347 /*
1348 * Allocate new queues for a TDLS station, unless we're in DQA mode,
1349 * and then they'll be allocated dynamically
1350 */
1351 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) {
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001352 ret = iwl_mvm_tdls_sta_init(mvm, sta);
1353 if (ret)
1354 return ret;
Liad Kaufman24afba72015-07-28 18:56:08 +03001355 } else if (!iwl_mvm_is_dqa_supported(mvm)) {
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001356 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1357 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
1358 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
1359 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001360
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001361 /* for HW restart - reset everything but the sequence number */
Liad Kaufman24afba72015-07-28 18:56:08 +03001362 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001363 u16 seq = mvm_sta->tid_data[i].seq_number;
1364 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1365 mvm_sta->tid_data[i].seq_number = seq;
Liad Kaufman24afba72015-07-28 18:56:08 +03001366
1367 if (!iwl_mvm_is_dqa_supported(mvm))
1368 continue;
1369
1370 /*
1371 * Mark all queues for this STA as unallocated and defer TX
1372 * frames until the queue is allocated
1373 */
1374 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
1375 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
Johannes Berg6d9d32b2013-08-06 18:58:56 +02001376 }
Liad Kaufman24afba72015-07-28 18:56:08 +03001377 mvm_sta->deferred_traffic_tid_map = 0;
Eyal Shapiraefed6642014-09-14 15:58:53 +03001378 mvm_sta->agg_tids = 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001379
Sara Sharona571f5f2015-12-07 12:50:58 +02001380 if (iwl_mvm_has_new_rx_api(mvm) &&
1381 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1382 dup_data = kcalloc(mvm->trans->num_rx_queues,
1383 sizeof(*dup_data),
1384 GFP_KERNEL);
1385 if (!dup_data)
1386 return -ENOMEM;
1387 mvm_sta->dup_data = dup_data;
1388 }
1389
Liad Kaufman24afba72015-07-28 18:56:08 +03001390 if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufmand5216a22015-08-09 15:50:51 +03001391 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1392 ieee80211_vif_type_p2p(vif));
Liad Kaufman24afba72015-07-28 18:56:08 +03001393 if (ret)
1394 goto err;
1395 }
1396
Liad Kaufman8d98ae62016-02-02 16:02:46 +02001397update_fw:
Liad Kaufman24afba72015-07-28 18:56:08 +03001398 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001399 if (ret)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001400 goto err;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001401
Johannes Berg9e848012014-08-04 14:33:42 +02001402 if (vif->type == NL80211_IFTYPE_STATION) {
1403 if (!sta->tdls) {
Sara Sharon0ae98812017-01-04 14:53:58 +02001404 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001405 mvmvif->ap_sta_id = sta_id;
1406 } else {
Sara Sharon0ae98812017-01-04 14:53:58 +02001407 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
Johannes Berg9e848012014-08-04 14:33:42 +02001408 }
1409 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001410
1411 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1412
1413 return 0;
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001414
1415err:
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001416 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
1417 iwl_mvm_tdls_sta_deinit(mvm, sta);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001418 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001419}
1420
1421int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1422 bool drain)
1423{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001424 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01001425 int ret;
1426 u32 status;
1427
1428 lockdep_assert_held(&mvm->mutex);
1429
1430 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1431 cmd.sta_id = mvmsta->sta_id;
1432 cmd.add_modify = STA_MODE_MODIFY;
1433 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1434 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1435
1436 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02001437 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1438 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001439 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001440 if (ret)
1441 return ret;
1442
Sara Sharon837c4da2016-01-07 16:50:45 +02001443 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001444 case ADD_STA_SUCCESS:
1445 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1446 mvmsta->sta_id);
1447 break;
1448 default:
1449 ret = -EIO;
1450 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1451 mvmsta->sta_id);
1452 break;
1453 }
1454
1455 return ret;
1456}
1457
1458/*
1459 * Remove a station from the FW table. Before sending the command to remove
1460 * the station validate that the station is indeed known to the driver (sanity
1461 * only).
1462 */
1463static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1464{
1465 struct ieee80211_sta *sta;
1466 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1467 .sta_id = sta_id,
1468 };
1469 int ret;
1470
1471 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1472 lockdep_is_held(&mvm->mutex));
1473
1474 /* Note: internal stations are marked as error values */
1475 if (!sta) {
1476 IWL_ERR(mvm, "Invalid station id\n");
1477 return -EINVAL;
1478 }
1479
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03001480 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01001481 sizeof(rm_sta_cmd), &rm_sta_cmd);
1482 if (ret) {
1483 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1484 return ret;
1485 }
1486
1487 return 0;
1488}
1489
1490void iwl_mvm_sta_drained_wk(struct work_struct *wk)
1491{
1492 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
1493 u8 sta_id;
1494
1495 /*
1496 * The mutex is needed because of the SYNC cmd, but not only: if the
1497 * work would run concurrently with iwl_mvm_rm_sta, it would run before
1498 * iwl_mvm_rm_sta sets the station as busy, and exit. Then
1499 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
1500 * that later.
1501 */
1502 mutex_lock(&mvm->mutex);
1503
1504 for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
1505 int ret;
1506 struct ieee80211_sta *sta =
1507 rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1508 lockdep_is_held(&mvm->mutex));
1509
Johannes Berg1ddbbb02013-12-04 22:39:17 +01001510 /*
1511 * This station is in use or RCU-removed; the latter happens in
1512 * managed mode, where mac80211 removes the station before we
1513 * can remove it from firmware (we can only do that after the
1514 * MAC is marked unassociated), and possibly while the deauth
1515 * frame to disconnect from the AP is still queued. Then, the
1516 * station pointer is -ENOENT when the last skb is reclaimed.
1517 */
1518 if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001519 continue;
1520
1521 if (PTR_ERR(sta) == -EINVAL) {
1522 IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
1523 sta_id);
1524 continue;
1525 }
1526
1527 if (!sta) {
1528 IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
1529 sta_id);
1530 continue;
1531 }
1532
1533 WARN_ON(PTR_ERR(sta) != -EBUSY);
1534 /* This station was removed and we waited until it got drained,
1535 * we can now proceed and remove it.
1536 */
1537 ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1538 if (ret) {
1539 IWL_ERR(mvm,
1540 "Couldn't remove sta %d after it was drained\n",
1541 sta_id);
1542 continue;
1543 }
Monam Agarwalc531c772014-03-24 00:05:56 +05301544 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001545 clear_bit(sta_id, mvm->sta_drained);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001546
1547 if (mvm->tfd_drained[sta_id]) {
1548 unsigned long i, msk = mvm->tfd_drained[sta_id];
1549
Emmanuel Grumbacha4ca3ed2015-01-20 17:07:10 +02001550 for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
Arik Nemtsov06ecdba2015-10-12 14:47:11 +03001551 iwl_mvm_disable_txq(mvm, i, i,
1552 IWL_MAX_TID_COUNT, 0);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001553
1554 mvm->tfd_drained[sta_id] = 0;
1555 IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
1556 sta_id, msk);
1557 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001558 }
1559
1560 mutex_unlock(&mvm->mutex);
1561}
1562
Liad Kaufman24afba72015-07-28 18:56:08 +03001563static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1564 struct ieee80211_vif *vif,
1565 struct iwl_mvm_sta *mvm_sta)
1566{
1567 int ac;
1568 int i;
1569
1570 lockdep_assert_held(&mvm->mutex);
1571
1572 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1573 if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
1574 continue;
1575
1576 ac = iwl_mvm_tid_to_ac_queue(i);
1577 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1578 vif->hw_queue[ac], i, 0);
1579 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
1580 }
1581}
1582
Johannes Berg8ca151b2013-01-24 14:25:36 +01001583int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1584 struct ieee80211_vif *vif,
1585 struct ieee80211_sta *sta)
1586{
1587 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01001588 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Sara Sharon94c3e612016-12-07 15:04:37 +02001589 u8 sta_id = mvm_sta->sta_id;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001590 int ret;
1591
1592 lockdep_assert_held(&mvm->mutex);
1593
Sara Sharona571f5f2015-12-07 12:50:58 +02001594 if (iwl_mvm_has_new_rx_api(mvm))
1595 kfree(mvm_sta->dup_data);
1596
Liad Kaufmana6f035a2015-08-24 15:23:14 +03001597 if ((vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon94c3e612016-12-07 15:04:37 +02001598 mvmvif->ap_sta_id == sta_id) ||
Liad Kaufmana6f035a2015-08-24 15:23:14 +03001599 iwl_mvm_is_dqa_supported(mvm)){
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001600 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1601 if (ret)
1602 return ret;
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02001603 /* flush its queues here since we are freeing mvm_sta */
Luca Coelho5888a402015-10-06 09:54:57 +03001604 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02001605 if (ret)
1606 return ret;
1607 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
1608 mvm_sta->tfd_queue_msk);
1609 if (ret)
1610 return ret;
1611 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
Emmanuel Grumbach80d85652013-02-19 15:32:42 +02001612
Liad Kaufman24afba72015-07-28 18:56:08 +03001613 /* If DQA is supported - the queues can be disabled now */
Sara Sharon94c3e612016-12-07 15:04:37 +02001614 if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufman56214742016-09-22 15:14:08 +03001615 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
Sara Sharon94c3e612016-12-07 15:04:37 +02001616 /*
1617 * If pending_frames is set at this point - it must be
1618 * driver internal logic error, since queues are empty
1619 * and removed successuly.
1620 * warn on it but set it to 0 anyway to avoid station
1621 * not being removed later in the function
1622 */
1623 WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0));
1624 }
Liad Kaufman56214742016-09-22 15:14:08 +03001625
1626 /* If there is a TXQ still marked as reserved - free it */
1627 if (iwl_mvm_is_dqa_supported(mvm) &&
1628 mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001629 u8 reserved_txq = mvm_sta->reserved_queue;
1630 enum iwl_mvm_queue_status *status;
1631
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001632 /*
1633 * If no traffic has gone through the reserved TXQ - it
1634 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1635 * should be manually marked as free again
1636 */
1637 spin_lock_bh(&mvm->queue_info_lock);
1638 status = &mvm->queue_info[reserved_txq].status;
1639 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1640 (*status != IWL_MVM_QUEUE_FREE),
1641 "sta_id %d reserved txq %d status %d",
Sara Sharon94c3e612016-12-07 15:04:37 +02001642 sta_id, reserved_txq, *status)) {
Liad Kaufmana0315dea2016-07-07 13:25:59 +03001643 spin_unlock_bh(&mvm->queue_info_lock);
1644 return -EINVAL;
1645 }
1646
1647 *status = IWL_MVM_QUEUE_FREE;
1648 spin_unlock_bh(&mvm->queue_info_lock);
1649 }
1650
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001651 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon94c3e612016-12-07 15:04:37 +02001652 mvmvif->ap_sta_id == sta_id) {
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001653 /* if associated - we can't remove the AP STA now */
1654 if (vif->bss_conf.assoc)
1655 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001656
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001657 /* unassoc - go ahead - remove the AP STA now */
Sara Sharon0ae98812017-01-04 14:53:58 +02001658 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
Eliad Peller37577fe2013-12-05 17:19:39 +02001659
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001660 /* clear d0i3_ap_sta_id if no longer relevant */
Sara Sharon94c3e612016-12-07 15:04:37 +02001661 if (mvm->d0i3_ap_sta_id == sta_id)
Sara Sharon0ae98812017-01-04 14:53:58 +02001662 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001663 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001664 }
1665
1666 /*
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03001667 * This shouldn't happen - the TDLS channel switch should be canceled
1668 * before the STA is removed.
1669 */
Sara Sharon94c3e612016-12-07 15:04:37 +02001670 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
Sara Sharon0ae98812017-01-04 14:53:58 +02001671 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
Arik Nemtsov1d3c3f62014-10-23 18:03:10 +03001672 cancel_delayed_work(&mvm->tdls_cs.dwork);
1673 }
1674
1675 /*
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001676 * Make sure that the tx response code sees the station as -EBUSY and
1677 * calls the drain worker.
1678 */
1679 spin_lock_bh(&mvm_sta->lock);
Sara Sharon94c3e612016-12-07 15:04:37 +02001680
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001681 /*
Johannes Berg8ca151b2013-01-24 14:25:36 +01001682 * There are frames pending on the AC queues for this station.
1683 * We need to wait until all the frames are drained...
1684 */
Sara Sharon94c3e612016-12-07 15:04:37 +02001685 if (atomic_read(&mvm->pending_frames[sta_id])) {
1686 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id],
Johannes Berg8ca151b2013-01-24 14:25:36 +01001687 ERR_PTR(-EBUSY));
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001688 spin_unlock_bh(&mvm_sta->lock);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001689
1690 /* disable TDLS sta queues on drain complete */
1691 if (sta->tdls) {
Sara Sharon94c3e612016-12-07 15:04:37 +02001692 mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk;
1693 IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001694 }
1695
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001696 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001697 } else {
Emmanuel Grumbache3d4bc82013-05-07 14:08:24 +03001698 spin_unlock_bh(&mvm_sta->lock);
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001699
Liad Kaufmane3118ad2016-06-05 10:49:02 +03001700 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
Arik Nemtsova0f6bf22014-09-21 19:10:04 +03001701 iwl_mvm_tdls_sta_deinit(mvm, sta);
1702
Johannes Berg8ca151b2013-01-24 14:25:36 +01001703 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
Monam Agarwalc531c772014-03-24 00:05:56 +05301704 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001705 }
1706
1707 return ret;
1708}
1709
1710int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1711 struct ieee80211_vif *vif,
1712 u8 sta_id)
1713{
1714 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1715
1716 lockdep_assert_held(&mvm->mutex);
1717
Monam Agarwalc531c772014-03-24 00:05:56 +05301718 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001719 return ret;
1720}
1721
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001722int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1723 struct iwl_mvm_int_sta *sta,
1724 u32 qmask, enum nl80211_iftype iftype)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001725{
1726 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
Eliad Pellerb92e6612014-01-23 17:58:23 +02001727 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
Sara Sharon0ae98812017-01-04 14:53:58 +02001728 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01001729 return -ENOSPC;
1730 }
1731
1732 sta->tfd_queue_msk = qmask;
1733
1734 /* put a non-NULL value so iterating over the stations won't stop */
1735 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1736 return 0;
1737}
1738
Sara Sharon26d6c162017-01-03 12:00:19 +02001739void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001740{
Monam Agarwalc531c772014-03-24 00:05:56 +05301741 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001742 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
Sara Sharon0ae98812017-01-04 14:53:58 +02001743 sta->sta_id = IWL_MVM_INVALID_STA;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001744}
1745
1746static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1747 struct iwl_mvm_int_sta *sta,
1748 const u8 *addr,
1749 u16 mac_id, u16 color)
1750{
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001751 struct iwl_mvm_add_sta_cmd cmd;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001752 int ret;
1753 u32 status;
1754
1755 lockdep_assert_held(&mvm->mutex);
1756
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001757 memset(&cmd, 0, sizeof(cmd));
Johannes Berg8ca151b2013-01-24 14:25:36 +01001758 cmd.sta_id = sta->sta_id;
1759 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1760 color));
1761
Sara Sharonbb497012016-09-29 14:52:40 +03001762 if (!iwl_mvm_has_new_tx_api(mvm))
1763 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
Liad Kaufmancf0cda12015-09-24 10:44:12 +02001764 cmd.tid_disable_tx = cpu_to_le16(0xffff);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001765
1766 if (addr)
1767 memcpy(cmd.addr, addr, ETH_ALEN);
1768
Sara Sharon854c5702016-01-26 13:17:47 +02001769 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1770 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03001771 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001772 if (ret)
1773 return ret;
1774
Sara Sharon837c4da2016-01-07 16:50:45 +02001775 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001776 case ADD_STA_SUCCESS:
1777 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1778 return 0;
1779 default:
1780 ret = -EIO;
1781 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1782 status);
1783 break;
1784 }
1785 return ret;
1786}
1787
Sara Sharonc5a719e2016-11-15 10:20:48 +02001788static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001789{
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001790 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1791 mvm->cfg->base_params->wd_timeout :
1792 IWL_WATCHDOG_DISABLED;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001793
Sara Sharon310181e2017-01-17 14:27:48 +02001794 if (iwl_mvm_has_new_tx_api(mvm)) {
1795 int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue,
1796 mvm->aux_sta.sta_id,
1797 IWL_MAX_TID_COUNT,
1798 wdg_timeout);
1799 mvm->aux_queue = queue;
1800 } else if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufman28d07932015-09-01 16:36:25 +03001801 struct iwl_trans_txq_scd_cfg cfg = {
1802 .fifo = IWL_MVM_TX_FIFO_MCAST,
1803 .sta_id = mvm->aux_sta.sta_id,
1804 .tid = IWL_MAX_TID_COUNT,
1805 .aggregate = false,
1806 .frame_limit = IWL_FRAME_LIMIT,
1807 };
1808
1809 iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
1810 wdg_timeout);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001811 } else {
1812 iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
1813 IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
Liad Kaufman28d07932015-09-01 16:36:25 +03001814 }
Sara Sharonc5a719e2016-11-15 10:20:48 +02001815}
1816
1817int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1818{
1819 int ret;
1820
1821 lockdep_assert_held(&mvm->mutex);
1822
1823 /* Allocate aux station and assign to it the aux queue */
1824 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
1825 NL80211_IFTYPE_UNSPECIFIED);
1826 if (ret)
1827 return ret;
1828
1829 /* Map Aux queue to fifo - needs to happen before adding Aux station */
1830 if (!iwl_mvm_has_new_tx_api(mvm))
1831 iwl_mvm_enable_aux_queue(mvm);
Liad Kaufman28d07932015-09-01 16:36:25 +03001832
Johannes Berg8ca151b2013-01-24 14:25:36 +01001833 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1834 MAC_INDEX_AUX, 0);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001835 if (ret) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01001836 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001837 return ret;
1838 }
1839
1840 /*
1841 * For a000 firmware and on we cannot add queue to a station unknown
1842 * to firmware so enable queue here - after the station was added
1843 */
1844 if (iwl_mvm_has_new_tx_api(mvm))
1845 iwl_mvm_enable_aux_queue(mvm);
1846
1847 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01001848}
1849
Chaya Rachel Ivgi0e39eb02015-12-03 15:51:46 +02001850int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1851{
1852 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1853
1854 lockdep_assert_held(&mvm->mutex);
1855 return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
1856 mvmvif->id, 0);
1857}
1858
1859int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1860{
1861 int ret;
1862
1863 lockdep_assert_held(&mvm->mutex);
1864
1865 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
1866 if (ret)
1867 IWL_WARN(mvm, "Failed sending remove station\n");
1868
1869 return ret;
1870}
1871
1872void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
1873{
1874 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
1875}
1876
Johannes Berg712b24a2014-08-04 14:14:14 +02001877void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
1878{
1879 lockdep_assert_held(&mvm->mutex);
1880
1881 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1882}
1883
Johannes Berg8ca151b2013-01-24 14:25:36 +01001884/*
1885 * Send the add station command for the vif's broadcast station.
1886 * Assumes that the station was already allocated.
1887 *
1888 * @mvm: the mvm component
1889 * @vif: the interface to which the broadcast station is added
1890 * @bsta: the broadcast station to add.
1891 */
Johannes Berg013290a2014-08-04 13:38:48 +02001892int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001893{
1894 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02001895 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg5023d962013-07-31 14:07:43 +02001896 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
Johannes Berga4243402014-01-20 23:46:38 +01001897 const u8 *baddr = _baddr;
Johannes Berg7daa7622017-02-24 12:02:22 +01001898 int queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02001899 int ret;
Sara Sharonc5a719e2016-11-15 10:20:48 +02001900 unsigned int wdg_timeout =
1901 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1902 struct iwl_trans_txq_scd_cfg cfg = {
1903 .fifo = IWL_MVM_TX_FIFO_VO,
1904 .sta_id = mvmvif->bcast_sta.sta_id,
1905 .tid = IWL_MAX_TID_COUNT,
1906 .aggregate = false,
1907 .frame_limit = IWL_FRAME_LIMIT,
1908 };
Johannes Berg8ca151b2013-01-24 14:25:36 +01001909
1910 lockdep_assert_held(&mvm->mutex);
1911
Sara Sharon310181e2017-01-17 14:27:48 +02001912 if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
Liad Kaufman4d339982017-03-21 17:13:16 +02001913 if (vif->type == NL80211_IFTYPE_AP ||
1914 vif->type == NL80211_IFTYPE_ADHOC)
Sara Sharon49f71712017-01-09 12:07:16 +02001915 queue = mvm->probe_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02001916 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
Sara Sharon49f71712017-01-09 12:07:16 +02001917 queue = mvm->p2p_dev_queue;
Liad Kaufmandf88c082016-11-24 15:31:00 +02001918 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
Liad Kaufmande24f632015-08-04 15:19:18 +03001919 return -EINVAL;
1920
Liad Kaufmandf88c082016-11-24 15:31:00 +02001921 bsta->tfd_queue_msk |= BIT(queue);
Sara Sharonc5a719e2016-11-15 10:20:48 +02001922
Sara Sharon310181e2017-01-17 14:27:48 +02001923 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
1924 &cfg, wdg_timeout);
Liad Kaufmande24f632015-08-04 15:19:18 +03001925 }
1926
Johannes Berg5023d962013-07-31 14:07:43 +02001927 if (vif->type == NL80211_IFTYPE_ADHOC)
1928 baddr = vif->bss_conf.bssid;
1929
Sara Sharon0ae98812017-01-04 14:53:58 +02001930 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
Johannes Berg8ca151b2013-01-24 14:25:36 +01001931 return -ENOSPC;
1932
Liad Kaufmandf88c082016-11-24 15:31:00 +02001933 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
1934 mvmvif->id, mvmvif->color);
1935 if (ret)
1936 return ret;
1937
1938 /*
Sara Sharonc5a719e2016-11-15 10:20:48 +02001939 * For a000 firmware and on we cannot add queue to a station unknown
1940 * to firmware so enable queue here - after the station was added
Liad Kaufmandf88c082016-11-24 15:31:00 +02001941 */
Sara Sharon310181e2017-01-17 14:27:48 +02001942 if (iwl_mvm_has_new_tx_api(mvm)) {
Johannes Berg7daa7622017-02-24 12:02:22 +01001943 queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
1944 bsta->sta_id,
1945 IWL_MAX_TID_COUNT,
1946 wdg_timeout);
1947
Sara Sharon310181e2017-01-17 14:27:48 +02001948 if (vif->type == NL80211_IFTYPE_AP)
1949 mvm->probe_queue = queue;
1950 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
1951 mvm->p2p_dev_queue = queue;
1952
1953 bsta->tfd_queue_msk |= BIT(queue);
1954 }
Liad Kaufmandf88c082016-11-24 15:31:00 +02001955
1956 return 0;
1957}
1958
1959static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
1960 struct ieee80211_vif *vif)
1961{
1962 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1963
1964 lockdep_assert_held(&mvm->mutex);
1965
Liad Kaufman4d339982017-03-21 17:13:16 +02001966 if (vif->type == NL80211_IFTYPE_AP ||
1967 vif->type == NL80211_IFTYPE_ADHOC)
Liad Kaufmandf88c082016-11-24 15:31:00 +02001968 iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
1969 IWL_MAX_TID_COUNT, 0);
1970
Sara Sharon49f71712017-01-09 12:07:16 +02001971 if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->probe_queue)) {
1972 iwl_mvm_disable_txq(mvm, mvm->probe_queue,
Liad Kaufmandf88c082016-11-24 15:31:00 +02001973 vif->hw_queue[0], IWL_MAX_TID_COUNT,
1974 0);
Sara Sharon49f71712017-01-09 12:07:16 +02001975 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->probe_queue);
Liad Kaufmandf88c082016-11-24 15:31:00 +02001976 }
1977
Sara Sharon49f71712017-01-09 12:07:16 +02001978 if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->p2p_dev_queue)) {
1979 iwl_mvm_disable_txq(mvm, mvm->p2p_dev_queue,
Liad Kaufmandf88c082016-11-24 15:31:00 +02001980 vif->hw_queue[0], IWL_MAX_TID_COUNT,
1981 0);
Sara Sharon49f71712017-01-09 12:07:16 +02001982 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->p2p_dev_queue);
Liad Kaufmandf88c082016-11-24 15:31:00 +02001983 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01001984}
1985
1986/* Send the FW a request to remove the station from it's internal data
1987 * structures, but DO NOT remove the entry from the local data structures. */
Johannes Berg013290a2014-08-04 13:38:48 +02001988int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01001989{
Johannes Berg013290a2014-08-04 13:38:48 +02001990 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001991 int ret;
1992
1993 lockdep_assert_held(&mvm->mutex);
1994
Liad Kaufmandf88c082016-11-24 15:31:00 +02001995 if (iwl_mvm_is_dqa_supported(mvm))
1996 iwl_mvm_free_bcast_sta_queues(mvm, vif);
1997
Johannes Berg013290a2014-08-04 13:38:48 +02001998 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
Johannes Berg8ca151b2013-01-24 14:25:36 +01001999 if (ret)
2000 IWL_WARN(mvm, "Failed sending remove station\n");
2001 return ret;
2002}
2003
Johannes Berg013290a2014-08-04 13:38:48 +02002004int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2005{
2006 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Liad Kaufmande24f632015-08-04 15:19:18 +03002007 u32 qmask = 0;
Johannes Berg013290a2014-08-04 13:38:48 +02002008
2009 lockdep_assert_held(&mvm->mutex);
2010
Liad Kaufmandf88c082016-11-24 15:31:00 +02002011 if (!iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufmande24f632015-08-04 15:19:18 +03002012 qmask = iwl_mvm_mac_get_queues_mask(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002013
Liad Kaufmande24f632015-08-04 15:19:18 +03002014 /*
2015 * The firmware defines the TFD queue mask to only be relevant
2016 * for *unicast* queues, so the multicast (CAB) queue shouldn't
Liad Kaufmandf88c082016-11-24 15:31:00 +02002017 * be included. This only happens in NL80211_IFTYPE_AP vif type,
2018 * so the next line will only have an effect there.
Liad Kaufmande24f632015-08-04 15:19:18 +03002019 */
Johannes Berg013290a2014-08-04 13:38:48 +02002020 qmask &= ~BIT(vif->cab_queue);
Liad Kaufmande24f632015-08-04 15:19:18 +03002021 }
2022
Johannes Berg013290a2014-08-04 13:38:48 +02002023 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
2024 ieee80211_vif_type_p2p(vif));
2025}
2026
Johannes Berg8ca151b2013-01-24 14:25:36 +01002027/* Allocate a new station entry for the broadcast station to the given vif,
2028 * and send it to the FW.
2029 * Note that each P2P mac should have its own broadcast station.
2030 *
2031 * @mvm: the mvm component
2032 * @vif: the interface to which the broadcast station is added
2033 * @bsta: the broadcast station to add. */
Johannes Berg013290a2014-08-04 13:38:48 +02002034int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002035{
2036 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg013290a2014-08-04 13:38:48 +02002037 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002038 int ret;
2039
2040 lockdep_assert_held(&mvm->mutex);
2041
Johannes Berg013290a2014-08-04 13:38:48 +02002042 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002043 if (ret)
2044 return ret;
2045
Johannes Berg013290a2014-08-04 13:38:48 +02002046 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002047
2048 if (ret)
2049 iwl_mvm_dealloc_int_sta(mvm, bsta);
Johannes Berg013290a2014-08-04 13:38:48 +02002050
Johannes Berg8ca151b2013-01-24 14:25:36 +01002051 return ret;
2052}
2053
Johannes Berg013290a2014-08-04 13:38:48 +02002054void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2055{
2056 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2057
2058 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2059}
2060
Johannes Berg8ca151b2013-01-24 14:25:36 +01002061/*
2062 * Send the FW a request to remove the station from it's internal data
2063 * structures, and in addition remove it from the local data structure.
2064 */
Johannes Berg013290a2014-08-04 13:38:48 +02002065int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002066{
2067 int ret;
2068
2069 lockdep_assert_held(&mvm->mutex);
2070
Johannes Berg013290a2014-08-04 13:38:48 +02002071 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002072
Johannes Berg013290a2014-08-04 13:38:48 +02002073 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2074
Johannes Berg8ca151b2013-01-24 14:25:36 +01002075 return ret;
2076}
2077
Sara Sharon26d6c162017-01-03 12:00:19 +02002078/*
2079 * Allocate a new station entry for the multicast station to the given vif,
2080 * and send it to the FW.
2081 * Note that each AP/GO mac should have its own multicast station.
2082 *
2083 * @mvm: the mvm component
2084 * @vif: the interface to which the multicast station is added
2085 */
2086int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2087{
2088 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2089 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2090 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2091 const u8 *maddr = _maddr;
2092 struct iwl_trans_txq_scd_cfg cfg = {
2093 .fifo = IWL_MVM_TX_FIFO_MCAST,
2094 .sta_id = msta->sta_id,
2095 .tid = IWL_MAX_TID_COUNT,
2096 .aggregate = false,
2097 .frame_limit = IWL_FRAME_LIMIT,
2098 };
2099 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2100 int ret;
2101
2102 lockdep_assert_held(&mvm->mutex);
2103
2104 if (!iwl_mvm_is_dqa_supported(mvm))
2105 return 0;
2106
2107 if (WARN_ON(vif->type != NL80211_IFTYPE_AP))
2108 return -ENOTSUPP;
2109
2110 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2111 mvmvif->id, mvmvif->color);
2112 if (ret) {
2113 iwl_mvm_dealloc_int_sta(mvm, msta);
2114 return ret;
2115 }
2116
2117 /*
2118 * Enable cab queue after the ADD_STA command is sent.
2119 * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG
2120 * command with unknown station id.
2121 */
Sara Sharon310181e2017-01-17 14:27:48 +02002122 if (iwl_mvm_has_new_tx_api(mvm)) {
2123 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2124 msta->sta_id,
2125 IWL_MAX_TID_COUNT,
2126 timeout);
2127 vif->cab_queue = queue;
2128 } else {
2129 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2130 &cfg, timeout);
2131 }
Sara Sharon26d6c162017-01-03 12:00:19 +02002132
2133 return 0;
2134}
2135
2136/*
2137 * Send the FW a request to remove the station from it's internal data
2138 * structures, and in addition remove it from the local data structure.
2139 */
2140int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2141{
2142 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2143 int ret;
2144
2145 lockdep_assert_held(&mvm->mutex);
2146
2147 if (!iwl_mvm_is_dqa_supported(mvm))
2148 return 0;
2149
2150 iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
2151 IWL_MAX_TID_COUNT, 0);
2152
2153 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2154 if (ret)
2155 IWL_WARN(mvm, "Failed sending remove station\n");
2156
2157 return ret;
2158}
2159
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002160#define IWL_MAX_RX_BA_SESSIONS 16
2161
Sara Sharonb915c102016-03-23 16:32:02 +02002162static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
Sara Sharon10b2b202016-03-20 16:23:41 +02002163{
Sara Sharonb915c102016-03-23 16:32:02 +02002164 struct iwl_mvm_delba_notif notif = {
2165 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2166 .metadata.sync = 1,
2167 .delba.baid = baid,
Sara Sharon10b2b202016-03-20 16:23:41 +02002168 };
Sara Sharonb915c102016-03-23 16:32:02 +02002169 iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
2170};
Sara Sharon10b2b202016-03-20 16:23:41 +02002171
Sara Sharonb915c102016-03-23 16:32:02 +02002172static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2173 struct iwl_mvm_baid_data *data)
2174{
2175 int i;
2176
2177 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2178
2179 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2180 int j;
2181 struct iwl_mvm_reorder_buffer *reorder_buf =
2182 &data->reorder_buf[i];
2183
Sara Sharon06904052016-02-28 20:28:17 +02002184 spin_lock_bh(&reorder_buf->lock);
2185 if (likely(!reorder_buf->num_stored)) {
2186 spin_unlock_bh(&reorder_buf->lock);
Sara Sharonb915c102016-03-23 16:32:02 +02002187 continue;
Sara Sharon06904052016-02-28 20:28:17 +02002188 }
Sara Sharonb915c102016-03-23 16:32:02 +02002189
2190 /*
2191 * This shouldn't happen in regular DELBA since the internal
2192 * delBA notification should trigger a release of all frames in
2193 * the reorder buffer.
2194 */
2195 WARN_ON(1);
2196
2197 for (j = 0; j < reorder_buf->buf_size; j++)
2198 __skb_queue_purge(&reorder_buf->entries[j]);
Sara Sharon06904052016-02-28 20:28:17 +02002199 /*
2200 * Prevent timer re-arm. This prevents a very far fetched case
2201 * where we timed out on the notification. There may be prior
2202 * RX frames pending in the RX queue before the notification
2203 * that might get processed between now and the actual deletion
2204 * and we would re-arm the timer although we are deleting the
2205 * reorder buffer.
2206 */
2207 reorder_buf->removed = true;
2208 spin_unlock_bh(&reorder_buf->lock);
2209 del_timer_sync(&reorder_buf->reorder_timer);
Sara Sharonb915c102016-03-23 16:32:02 +02002210 }
2211}
2212
2213static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2214 u32 sta_id,
2215 struct iwl_mvm_baid_data *data,
2216 u16 ssn, u8 buf_size)
2217{
2218 int i;
2219
2220 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2221 struct iwl_mvm_reorder_buffer *reorder_buf =
2222 &data->reorder_buf[i];
2223 int j;
2224
2225 reorder_buf->num_stored = 0;
2226 reorder_buf->head_sn = ssn;
2227 reorder_buf->buf_size = buf_size;
Sara Sharon06904052016-02-28 20:28:17 +02002228 /* rx reorder timer */
2229 reorder_buf->reorder_timer.function =
2230 iwl_mvm_reorder_timer_expired;
2231 reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
2232 init_timer(&reorder_buf->reorder_timer);
2233 spin_lock_init(&reorder_buf->lock);
2234 reorder_buf->mvm = mvm;
Sara Sharonb915c102016-03-23 16:32:02 +02002235 reorder_buf->queue = i;
2236 reorder_buf->sta_id = sta_id;
Sara Sharon5d43eab2017-02-02 12:51:39 +02002237 reorder_buf->valid = false;
Sara Sharonb915c102016-03-23 16:32:02 +02002238 for (j = 0; j < reorder_buf->buf_size; j++)
2239 __skb_queue_head_init(&reorder_buf->entries[j]);
2240 }
Sara Sharon10b2b202016-03-20 16:23:41 +02002241}
2242
Johannes Berg8ca151b2013-01-24 14:25:36 +01002243int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
Sara Sharon10b2b202016-03-20 16:23:41 +02002244 int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002245{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002246 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002247 struct iwl_mvm_add_sta_cmd cmd = {};
Sara Sharon10b2b202016-03-20 16:23:41 +02002248 struct iwl_mvm_baid_data *baid_data = NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002249 int ret;
2250 u32 status;
2251
2252 lockdep_assert_held(&mvm->mutex);
2253
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002254 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2255 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2256 return -ENOSPC;
2257 }
2258
Sara Sharon10b2b202016-03-20 16:23:41 +02002259 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2260 /*
2261 * Allocate here so if allocation fails we can bail out early
2262 * before starting the BA session in the firmware
2263 */
Sara Sharonb915c102016-03-23 16:32:02 +02002264 baid_data = kzalloc(sizeof(*baid_data) +
2265 mvm->trans->num_rx_queues *
2266 sizeof(baid_data->reorder_buf[0]),
2267 GFP_KERNEL);
Sara Sharon10b2b202016-03-20 16:23:41 +02002268 if (!baid_data)
2269 return -ENOMEM;
2270 }
2271
Johannes Berg8ca151b2013-01-24 14:25:36 +01002272 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2273 cmd.sta_id = mvm_sta->sta_id;
2274 cmd.add_modify = STA_MODE_MODIFY;
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002275 if (start) {
2276 cmd.add_immediate_ba_tid = (u8) tid;
2277 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
Sara Sharon854c5702016-01-26 13:17:47 +02002278 cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
Emmanuel Grumbach93a42662013-07-02 13:35:35 +03002279 } else {
2280 cmd.remove_immediate_ba_tid = (u8) tid;
2281 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002282 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2283 STA_MODIFY_REMOVE_BA_TID;
2284
2285 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002286 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2287 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002288 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002289 if (ret)
Sara Sharon10b2b202016-03-20 16:23:41 +02002290 goto out_free;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002291
Sara Sharon837c4da2016-01-07 16:50:45 +02002292 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002293 case ADD_STA_SUCCESS:
Sara Sharon35263a02016-06-21 12:12:10 +03002294 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2295 start ? "start" : "stopp");
Johannes Berg8ca151b2013-01-24 14:25:36 +01002296 break;
2297 case ADD_STA_IMMEDIATE_BA_FAILURE:
2298 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2299 ret = -ENOSPC;
2300 break;
2301 default:
2302 ret = -EIO;
2303 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2304 start ? "start" : "stopp", status);
2305 break;
2306 }
2307
Sara Sharon10b2b202016-03-20 16:23:41 +02002308 if (ret)
2309 goto out_free;
Emmanuel Grumbach113a0442013-07-02 14:16:38 +03002310
Sara Sharon10b2b202016-03-20 16:23:41 +02002311 if (start) {
2312 u8 baid;
2313
2314 mvm->rx_ba_sessions++;
2315
2316 if (!iwl_mvm_has_new_rx_api(mvm))
2317 return 0;
2318
2319 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2320 ret = -EINVAL;
2321 goto out_free;
2322 }
2323 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2324 IWL_ADD_STA_BAID_SHIFT);
2325 baid_data->baid = baid;
2326 baid_data->timeout = timeout;
2327 baid_data->last_rx = jiffies;
Wei Yongjun72c240f2016-07-12 11:40:57 +00002328 setup_timer(&baid_data->session_timer,
2329 iwl_mvm_rx_agg_session_expired,
2330 (unsigned long)&mvm->baid_map[baid]);
Sara Sharon10b2b202016-03-20 16:23:41 +02002331 baid_data->mvm = mvm;
2332 baid_data->tid = tid;
2333 baid_data->sta_id = mvm_sta->sta_id;
2334
2335 mvm_sta->tid_to_baid[tid] = baid;
2336 if (timeout)
2337 mod_timer(&baid_data->session_timer,
2338 TU_TO_EXP_TIME(timeout * 2));
2339
Sara Sharonb915c102016-03-23 16:32:02 +02002340 iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
2341 baid_data, ssn, buf_size);
Sara Sharon10b2b202016-03-20 16:23:41 +02002342 /*
2343 * protect the BA data with RCU to cover a case where our
2344 * internal RX sync mechanism will timeout (not that it's
2345 * supposed to happen) and we will free the session data while
2346 * RX is being processed in parallel
2347 */
Sara Sharon35263a02016-06-21 12:12:10 +03002348 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2349 mvm_sta->sta_id, tid, baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002350 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2351 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
Sara Sharon60dec522016-06-21 14:14:08 +03002352 } else {
Sara Sharon10b2b202016-03-20 16:23:41 +02002353 u8 baid = mvm_sta->tid_to_baid[tid];
2354
Sara Sharon60dec522016-06-21 14:14:08 +03002355 if (mvm->rx_ba_sessions > 0)
2356 /* check that restart flow didn't zero the counter */
2357 mvm->rx_ba_sessions--;
Sara Sharon10b2b202016-03-20 16:23:41 +02002358 if (!iwl_mvm_has_new_rx_api(mvm))
2359 return 0;
2360
2361 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2362 return -EINVAL;
2363
2364 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2365 if (WARN_ON(!baid_data))
2366 return -EINVAL;
2367
2368 /* synchronize all rx queues so we can safely delete */
Sara Sharonb915c102016-03-23 16:32:02 +02002369 iwl_mvm_free_reorder(mvm, baid_data);
Sara Sharon10b2b202016-03-20 16:23:41 +02002370 del_timer_sync(&baid_data->session_timer);
Sara Sharon10b2b202016-03-20 16:23:41 +02002371 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2372 kfree_rcu(baid_data, rcu_head);
Sara Sharon35263a02016-06-21 12:12:10 +03002373 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
Sara Sharon10b2b202016-03-20 16:23:41 +02002374 }
2375 return 0;
2376
2377out_free:
2378 kfree(baid_data);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002379 return ret;
2380}
2381
Liad Kaufman9794c642015-08-19 17:34:28 +03002382int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2383 int tid, u8 queue, bool start)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002384{
Johannes Berg9d8ce6a2014-12-23 16:02:40 +01002385 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002386 struct iwl_mvm_add_sta_cmd cmd = {};
Johannes Berg8ca151b2013-01-24 14:25:36 +01002387 int ret;
2388 u32 status;
2389
2390 lockdep_assert_held(&mvm->mutex);
2391
2392 if (start) {
2393 mvm_sta->tfd_queue_msk |= BIT(queue);
2394 mvm_sta->tid_disable_agg &= ~BIT(tid);
2395 } else {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002396 /* In DQA-mode the queue isn't removed on agg termination */
2397 if (!iwl_mvm_is_dqa_supported(mvm))
2398 mvm_sta->tfd_queue_msk &= ~BIT(queue);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002399 mvm_sta->tid_disable_agg |= BIT(tid);
2400 }
2401
2402 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2403 cmd.sta_id = mvm_sta->sta_id;
2404 cmd.add_modify = STA_MODE_MODIFY;
Sara Sharonbb497012016-09-29 14:52:40 +03002405 if (!iwl_mvm_has_new_tx_api(mvm))
2406 cmd.modify_mask = STA_MODIFY_QUEUES;
2407 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002408 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2409 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2410
2411 status = ADD_STA_SUCCESS;
Sara Sharon854c5702016-01-26 13:17:47 +02002412 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2413 iwl_mvm_add_sta_cmd_size(mvm),
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002414 &cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002415 if (ret)
2416 return ret;
2417
Sara Sharon837c4da2016-01-07 16:50:45 +02002418 switch (status & IWL_ADD_STA_STATUS_MASK) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002419 case ADD_STA_SUCCESS:
2420 break;
2421 default:
2422 ret = -EIO;
2423 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2424 start ? "start" : "stopp", status);
2425 break;
2426 }
2427
2428 return ret;
2429}
2430
Emmanuel Grumbachb797e3f2014-03-06 14:49:36 +02002431const u8 tid_to_mac80211_ac[] = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002432 IEEE80211_AC_BE,
2433 IEEE80211_AC_BK,
2434 IEEE80211_AC_BK,
2435 IEEE80211_AC_BE,
2436 IEEE80211_AC_VI,
2437 IEEE80211_AC_VI,
2438 IEEE80211_AC_VO,
2439 IEEE80211_AC_VO,
Liad Kaufman9794c642015-08-19 17:34:28 +03002440 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
Johannes Berg8ca151b2013-01-24 14:25:36 +01002441};
2442
Johannes Berg3e56ead2013-02-15 22:23:18 +01002443static const u8 tid_to_ucode_ac[] = {
2444 AC_BE,
2445 AC_BK,
2446 AC_BK,
2447 AC_BE,
2448 AC_VI,
2449 AC_VI,
2450 AC_VO,
2451 AC_VO,
2452};
2453
Johannes Berg8ca151b2013-01-24 14:25:36 +01002454int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2455 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2456{
Johannes Berg5b577a92013-11-14 18:20:04 +01002457 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002458 struct iwl_mvm_tid_data *tid_data;
2459 int txq_id;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002460 int ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002461
2462 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2463 return -EINVAL;
2464
2465 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2466 IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
2467 mvmsta->tid_data[tid].state);
2468 return -ENXIO;
2469 }
2470
2471 lockdep_assert_held(&mvm->mutex);
2472
Arik Nemtsovb2492502014-03-13 12:21:50 +02002473 spin_lock_bh(&mvmsta->lock);
2474
2475 /* possible race condition - we entered D0i3 while starting agg */
2476 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2477 spin_unlock_bh(&mvmsta->lock);
2478 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2479 return -EIO;
2480 }
2481
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002482 spin_lock(&mvm->queue_info_lock);
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002483
Liad Kaufmancf961e12015-08-13 19:16:08 +03002484 /*
2485 * Note the possible cases:
2486 * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
2487 * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
2488 * one and mark it as reserved
2489 * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
2490 * non-DQA mode, since the TXQ hasn't yet been allocated
2491 */
2492 txq_id = mvmsta->tid_data[tid].txq_id;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002493 if (iwl_mvm_is_dqa_supported(mvm) &&
2494 unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) {
2495 ret = -ENXIO;
2496 IWL_DEBUG_TX_QUEUES(mvm,
2497 "Can't start tid %d agg on shared queue!\n",
2498 tid);
2499 goto release_locks;
2500 } else if (!iwl_mvm_is_dqa_supported(mvm) ||
Liad Kaufmancf961e12015-08-13 19:16:08 +03002501 mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
Liad Kaufman9794c642015-08-19 17:34:28 +03002502 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2503 mvm->first_agg_queue,
Liad Kaufmancf961e12015-08-13 19:16:08 +03002504 mvm->last_agg_queue);
2505 if (txq_id < 0) {
2506 ret = txq_id;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002507 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2508 goto release_locks;
2509 }
Sara Sharon01796ff2016-11-16 17:04:36 +02002510 /*
2511 * TXQ shouldn't be in inactive mode for non-DQA, so getting
2512 * an inactive queue from iwl_mvm_find_free_queue() is
2513 * certainly a bug
2514 */
2515 WARN_ON(mvm->queue_info[txq_id].status ==
2516 IWL_MVM_QUEUE_INACTIVE);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002517
2518 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2519 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002520 }
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002521
2522 spin_unlock(&mvm->queue_info_lock);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002523
Liad Kaufmancf961e12015-08-13 19:16:08 +03002524 IWL_DEBUG_TX_QUEUES(mvm,
2525 "AGG for tid %d will be on queue #%d\n",
2526 tid, txq_id);
2527
Johannes Berg8ca151b2013-01-24 14:25:36 +01002528 tid_data = &mvmsta->tid_data[tid];
Johannes Berg9a886582013-02-15 19:25:00 +01002529 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002530 tid_data->txq_id = txq_id;
2531 *ssn = tid_data->ssn;
2532
2533 IWL_DEBUG_TX_QUEUES(mvm,
2534 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2535 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2536 tid_data->next_reclaimed);
2537
2538 if (tid_data->ssn == tid_data->next_reclaimed) {
2539 tid_data->state = IWL_AGG_STARTING;
2540 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2541 } else {
2542 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2543 }
2544
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002545 ret = 0;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002546 goto out;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002547
2548release_locks:
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002549 spin_unlock(&mvm->queue_info_lock);
2550out:
Johannes Berg8ca151b2013-01-24 14:25:36 +01002551 spin_unlock_bh(&mvmsta->lock);
2552
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002553 return ret;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002554}
2555
2556int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002557 struct ieee80211_sta *sta, u16 tid, u8 buf_size,
2558 bool amsdu)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002559{
Johannes Berg5b577a92013-11-14 18:20:04 +01002560 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002561 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
Emmanuel Grumbach5d42e7b2015-03-19 20:04:51 +02002562 unsigned int wdg_timeout =
2563 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002564 int queue, ret;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002565 bool alloc_queue = true;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002566 enum iwl_mvm_queue_status queue_status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002567 u16 ssn;
2568
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002569 struct iwl_trans_txq_scd_cfg cfg = {
2570 .sta_id = mvmsta->sta_id,
2571 .tid = tid,
2572 .frame_limit = buf_size,
2573 .aggregate = true,
2574 };
2575
Eyal Shapiraefed6642014-09-14 15:58:53 +03002576 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2577 != IWL_MAX_TID_COUNT);
2578
Johannes Berg8ca151b2013-01-24 14:25:36 +01002579 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
2580
2581 spin_lock_bh(&mvmsta->lock);
2582 ssn = tid_data->ssn;
2583 queue = tid_data->txq_id;
2584 tid_data->state = IWL_AGG_ON;
Eyal Shapiraefed6642014-09-14 15:58:53 +03002585 mvmsta->agg_tids |= BIT(tid);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002586 tid_data->ssn = 0xffff;
Emmanuel Grumbachbb81bb62015-10-26 16:00:29 +02002587 tid_data->amsdu_in_ampdu_allowed = amsdu;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002588 spin_unlock_bh(&mvmsta->lock);
2589
Emmanuel Grumbacheea76c32016-02-21 16:29:17 +02002590 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
Johannes Berg8ca151b2013-01-24 14:25:36 +01002591
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002592 spin_lock_bh(&mvm->queue_info_lock);
2593 queue_status = mvm->queue_info[queue].status;
2594 spin_unlock_bh(&mvm->queue_info_lock);
2595
Liad Kaufmancf961e12015-08-13 19:16:08 +03002596 /* In DQA mode, the existing queue might need to be reconfigured */
2597 if (iwl_mvm_is_dqa_supported(mvm)) {
Liad Kaufmancf961e12015-08-13 19:16:08 +03002598 /* Maybe there is no need to even alloc a queue... */
2599 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2600 alloc_queue = false;
Liad Kaufmancf961e12015-08-13 19:16:08 +03002601
2602 /*
2603 * Only reconfig the SCD for the queue if the window size has
2604 * changed from current (become smaller)
2605 */
2606 if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
2607 /*
Sara Sharonbb497012016-09-29 14:52:40 +03002608 * On new TX API rs and BA manager are offloaded.
2609 * For now though, just don't support being reconfigured
2610 */
2611 if (iwl_mvm_has_new_tx_api(mvm))
2612 return -ENOTSUPP;
2613
2614 /*
Liad Kaufmancf961e12015-08-13 19:16:08 +03002615 * If reconfiguring an existing queue, it first must be
2616 * drained
2617 */
2618 ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
2619 BIT(queue));
2620 if (ret) {
2621 IWL_ERR(mvm,
2622 "Error draining queue before reconfig\n");
2623 return ret;
2624 }
2625
2626 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2627 mvmsta->sta_id, tid,
2628 buf_size, ssn);
2629 if (ret) {
2630 IWL_ERR(mvm,
2631 "Error reconfiguring TXQ #%d\n", queue);
2632 return ret;
2633 }
2634 }
2635 }
2636
2637 if (alloc_queue)
2638 iwl_mvm_enable_txq(mvm, queue,
2639 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
2640 &cfg, wdg_timeout);
Andrei Otcheretianskifa7878e2015-05-05 09:28:16 +03002641
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002642 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2643 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2644 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2645 if (ret)
2646 return -EIO;
2647 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002648
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002649 /* No need to mark as reserved */
2650 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002651 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002652 spin_unlock_bh(&mvm->queue_info_lock);
2653
Johannes Berg8ca151b2013-01-24 14:25:36 +01002654 /*
2655 * Even though in theory the peer could have different
2656 * aggregation reorder buffer sizes for different sessions,
2657 * our ucode doesn't allow for that and has a global limit
2658 * for each station. Therefore, use the minimum of all the
2659 * aggregation sessions and our default value.
2660 */
2661 mvmsta->max_agg_bufsize =
2662 min(mvmsta->max_agg_bufsize, buf_size);
2663 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2664
Eytan Lifshitz9ee718a2013-05-19 19:14:41 +03002665 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2666 sta->addr, tid);
2667
Eyal Shapira9e680942013-11-09 00:16:16 +02002668 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002669}
2670
2671int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2672 struct ieee80211_sta *sta, u16 tid)
2673{
Johannes Berg5b577a92013-11-14 18:20:04 +01002674 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002675 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2676 u16 txq_id;
2677 int err;
2678
Emmanuel Grumbachf9aa8dd2013-03-04 09:11:08 +02002679 /*
2680 * If mac80211 is cleaning its state, then say that we finished since
2681 * our state has been cleared anyway.
2682 */
2683 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
2684 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2685 return 0;
2686 }
2687
Johannes Berg8ca151b2013-01-24 14:25:36 +01002688 spin_lock_bh(&mvmsta->lock);
2689
2690 txq_id = tid_data->txq_id;
2691
2692 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
2693 mvmsta->sta_id, tid, txq_id, tid_data->state);
2694
Eyal Shapiraefed6642014-09-14 15:58:53 +03002695 mvmsta->agg_tids &= ~BIT(tid);
2696
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002697 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002698 /*
2699 * The TXQ is marked as reserved only if no traffic came through yet
2700 * This means no traffic has been sent on this TID (agg'd or not), so
2701 * we no longer have use for the queue. Since it hasn't even been
2702 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2703 * free.
2704 */
2705 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2706 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
Liad Kaufman9f9af3d2015-12-23 16:03:46 +02002707
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002708 spin_unlock_bh(&mvm->queue_info_lock);
2709
Johannes Berg8ca151b2013-01-24 14:25:36 +01002710 switch (tid_data->state) {
2711 case IWL_AGG_ON:
Johannes Berg9a886582013-02-15 19:25:00 +01002712 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002713
2714 IWL_DEBUG_TX_QUEUES(mvm,
2715 "ssn = %d, next_recl = %d\n",
2716 tid_data->ssn, tid_data->next_reclaimed);
2717
2718 /* There are still packets for this RA / TID in the HW */
2719 if (tid_data->ssn != tid_data->next_reclaimed) {
2720 tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
2721 err = 0;
2722 break;
2723 }
2724
2725 tid_data->ssn = 0xffff;
Johannes Bergf7f89e72014-08-05 15:24:44 +02002726 tid_data->state = IWL_AGG_OFF;
Johannes Bergf7f89e72014-08-05 15:24:44 +02002727 spin_unlock_bh(&mvmsta->lock);
2728
2729 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2730
2731 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2732
Liad Kaufmancf961e12015-08-13 19:16:08 +03002733 if (!iwl_mvm_is_dqa_supported(mvm)) {
2734 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
2735
2736 iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
2737 }
Johannes Bergf7f89e72014-08-05 15:24:44 +02002738 return 0;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002739 case IWL_AGG_STARTING:
2740 case IWL_EMPTYING_HW_QUEUE_ADDBA:
2741 /*
2742 * The agg session has been stopped before it was set up. This
2743 * can happen when the AddBA timer times out for example.
2744 */
2745
2746 /* No barriers since we are under mutex */
2747 lockdep_assert_held(&mvm->mutex);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002748
2749 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2750 tid_data->state = IWL_AGG_OFF;
2751 err = 0;
2752 break;
2753 default:
2754 IWL_ERR(mvm,
2755 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2756 mvmsta->sta_id, tid, tid_data->state);
2757 IWL_ERR(mvm,
2758 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
2759 err = -EINVAL;
2760 }
2761
2762 spin_unlock_bh(&mvmsta->lock);
2763
2764 return err;
2765}
2766
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002767int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2768 struct ieee80211_sta *sta, u16 tid)
2769{
Johannes Berg5b577a92013-11-14 18:20:04 +01002770 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002771 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2772 u16 txq_id;
Johannes Bergb6658ff2013-07-24 13:55:51 +02002773 enum iwl_mvm_agg_state old_state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002774
2775 /*
2776 * First set the agg state to OFF to avoid calling
2777 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2778 */
2779 spin_lock_bh(&mvmsta->lock);
2780 txq_id = tid_data->txq_id;
2781 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
2782 mvmsta->sta_id, tid, txq_id, tid_data->state);
Johannes Bergb6658ff2013-07-24 13:55:51 +02002783 old_state = tid_data->state;
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002784 tid_data->state = IWL_AGG_OFF;
Eyal Shapiraefed6642014-09-14 15:58:53 +03002785 mvmsta->agg_tids &= ~BIT(tid);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002786 spin_unlock_bh(&mvmsta->lock);
2787
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002788 spin_lock_bh(&mvm->queue_info_lock);
Liad Kaufmancf961e12015-08-13 19:16:08 +03002789 /*
2790 * The TXQ is marked as reserved only if no traffic came through yet
2791 * This means no traffic has been sent on this TID (agg'd or not), so
2792 * we no longer have use for the queue. Since it hasn't even been
2793 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2794 * free.
2795 */
2796 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2797 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
Liad Kaufman4ecafae2015-07-14 13:36:18 +03002798 spin_unlock_bh(&mvm->queue_info_lock);
2799
Johannes Bergb6658ff2013-07-24 13:55:51 +02002800 if (old_state >= IWL_AGG_ON) {
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02002801 iwl_mvm_drain_sta(mvm, mvmsta, true);
Luca Coelho5888a402015-10-06 09:54:57 +03002802 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
Johannes Bergb6658ff2013-07-24 13:55:51 +02002803 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
Emmanuel Grumbachfe92e322015-03-11 09:34:31 +02002804 iwl_trans_wait_tx_queue_empty(mvm->trans,
2805 mvmsta->tfd_queue_msk);
2806 iwl_mvm_drain_sta(mvm, mvmsta, false);
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002807
Johannes Bergf7f89e72014-08-05 15:24:44 +02002808 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2809
Liad Kaufmancf961e12015-08-13 19:16:08 +03002810 if (!iwl_mvm_is_dqa_supported(mvm)) {
2811 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
2812
2813 iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
2814 tid, 0);
2815 }
Johannes Bergb6658ff2013-07-24 13:55:51 +02002816 }
2817
Emmanuel Grumbache3d9e7c2013-02-19 16:13:53 +02002818 return 0;
2819}
2820
Johannes Berg8ca151b2013-01-24 14:25:36 +01002821static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
2822{
Johannes Berg2dc2a152015-06-16 17:09:18 +02002823 int i, max = -1, max_offs = -1;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002824
2825 lockdep_assert_held(&mvm->mutex);
2826
Johannes Berg2dc2a152015-06-16 17:09:18 +02002827 /* Pick the unused key offset with the highest 'deleted'
2828 * counter. Every time a key is deleted, all the counters
2829 * are incremented and the one that was just deleted is
2830 * reset to zero. Thus, the highest counter is the one
2831 * that was deleted longest ago. Pick that one.
2832 */
2833 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2834 if (test_bit(i, mvm->fw_key_table))
2835 continue;
2836 if (mvm->fw_key_deleted[i] > max) {
2837 max = mvm->fw_key_deleted[i];
2838 max_offs = i;
2839 }
2840 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002841
Johannes Berg2dc2a152015-06-16 17:09:18 +02002842 if (max_offs < 0)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002843 return STA_KEY_IDX_INVALID;
2844
Johannes Berg2dc2a152015-06-16 17:09:18 +02002845 return max_offs;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002846}
2847
Johannes Berg5f7a1842015-12-11 09:36:10 +01002848static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
2849 struct ieee80211_vif *vif,
2850 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002851{
Johannes Berg5b530e92014-12-23 16:00:17 +01002852 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002853
Johannes Berg5f7a1842015-12-11 09:36:10 +01002854 if (sta)
2855 return iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002856
2857 /*
2858 * The device expects GTKs for station interfaces to be
2859 * installed as GTKs for the AP station. If we have no
2860 * station ID, then use AP's station ID.
2861 */
2862 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02002863 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Avri Altman9513c5e2015-10-19 16:29:11 +02002864 u8 sta_id = mvmvif->ap_sta_id;
2865
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03002866 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
2867 lockdep_is_held(&mvm->mutex));
2868
Avri Altman9513c5e2015-10-19 16:29:11 +02002869 /*
2870 * It is possible that the 'sta' parameter is NULL,
2871 * for example when a GTK is removed - the sta_id will then
2872 * be the AP ID, and no station was passed by mac80211.
2873 */
Emmanuel Grumbach7d6a1ab2016-05-15 10:20:29 +03002874 if (IS_ERR_OR_NULL(sta))
2875 return NULL;
2876
2877 return iwl_mvm_sta_from_mac80211(sta);
Avri Altman9513c5e2015-10-19 16:29:11 +02002878 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002879
Johannes Berg5f7a1842015-12-11 09:36:10 +01002880 return NULL;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002881}
2882
2883static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
2884 struct iwl_mvm_sta *mvm_sta,
Sara Sharon45c458b2016-11-09 15:43:26 +02002885 struct ieee80211_key_conf *key, bool mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02002886 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
2887 u8 key_offset)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002888{
Sara Sharon45c458b2016-11-09 15:43:26 +02002889 union {
2890 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2891 struct iwl_mvm_add_sta_key_cmd cmd;
2892 } u = {};
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03002893 __le16 key_flags;
Johannes Berg79920742014-11-03 15:43:04 +01002894 int ret;
2895 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002896 u16 keyidx;
Sara Sharon45c458b2016-11-09 15:43:26 +02002897 u64 pn = 0;
2898 int i, size;
2899 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2900 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002901
Sara Sharon45c458b2016-11-09 15:43:26 +02002902 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
Johannes Berg8ca151b2013-01-24 14:25:36 +01002903 STA_KEY_FLG_KEYID_MSK;
2904 key_flags = cpu_to_le16(keyidx);
2905 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
2906
Sara Sharon45c458b2016-11-09 15:43:26 +02002907 switch (key->cipher) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01002908 case WLAN_CIPHER_SUITE_TKIP:
2909 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
Sara Sharon45c458b2016-11-09 15:43:26 +02002910 if (new_api) {
2911 memcpy((void *)&u.cmd.tx_mic_key,
2912 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
2913 IWL_MIC_KEY_SIZE);
2914
2915 memcpy((void *)&u.cmd.rx_mic_key,
2916 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
2917 IWL_MIC_KEY_SIZE);
2918 pn = atomic64_read(&key->tx_pn);
2919
2920 } else {
2921 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
2922 for (i = 0; i < 5; i++)
2923 u.cmd_v1.tkip_rx_ttak[i] =
2924 cpu_to_le16(tkip_p1k[i]);
2925 }
2926 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002927 break;
2928 case WLAN_CIPHER_SUITE_CCMP:
2929 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
Sara Sharon45c458b2016-11-09 15:43:26 +02002930 memcpy(u.cmd.common.key, key->key, key->keylen);
2931 if (new_api)
2932 pn = atomic64_read(&key->tx_pn);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002933 break;
Johannes Bergba3943b2014-11-12 23:54:48 +01002934 case WLAN_CIPHER_SUITE_WEP104:
2935 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
John W. Linvilleaa0cb082015-01-12 16:18:11 -05002936 /* fall through */
Johannes Bergba3943b2014-11-12 23:54:48 +01002937 case WLAN_CIPHER_SUITE_WEP40:
2938 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
Sara Sharon45c458b2016-11-09 15:43:26 +02002939 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
Johannes Bergba3943b2014-11-12 23:54:48 +01002940 break;
Ayala Beker2a53d162016-04-07 16:21:57 +03002941 case WLAN_CIPHER_SUITE_GCMP_256:
2942 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
2943 /* fall through */
2944 case WLAN_CIPHER_SUITE_GCMP:
2945 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
Sara Sharon45c458b2016-11-09 15:43:26 +02002946 memcpy(u.cmd.common.key, key->key, key->keylen);
2947 if (new_api)
2948 pn = atomic64_read(&key->tx_pn);
Ayala Beker2a53d162016-04-07 16:21:57 +03002949 break;
Johannes Berg8ca151b2013-01-24 14:25:36 +01002950 default:
Max Stepanove36e5432013-08-27 19:56:13 +03002951 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
Sara Sharon45c458b2016-11-09 15:43:26 +02002952 memcpy(u.cmd.common.key, key->key, key->keylen);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002953 }
2954
Johannes Bergba3943b2014-11-12 23:54:48 +01002955 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01002956 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2957
Sara Sharon45c458b2016-11-09 15:43:26 +02002958 u.cmd.common.key_offset = key_offset;
2959 u.cmd.common.key_flags = key_flags;
2960 u.cmd.common.sta_id = mvm_sta->sta_id;
2961
2962 if (new_api) {
2963 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
2964 size = sizeof(u.cmd);
2965 } else {
2966 size = sizeof(u.cmd_v1);
2967 }
Johannes Berg8ca151b2013-01-24 14:25:36 +01002968
2969 status = ADD_STA_SUCCESS;
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002970 if (cmd_flags & CMD_ASYNC)
Sara Sharon45c458b2016-11-09 15:43:26 +02002971 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
2972 &u.cmd);
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03002973 else
Sara Sharon45c458b2016-11-09 15:43:26 +02002974 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
2975 &u.cmd, &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01002976
2977 switch (status) {
2978 case ADD_STA_SUCCESS:
2979 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
2980 break;
2981 default:
2982 ret = -EIO;
2983 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
2984 break;
2985 }
2986
2987 return ret;
2988}
2989
2990static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
2991 struct ieee80211_key_conf *keyconf,
2992 u8 sta_id, bool remove_key)
2993{
2994 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
2995
2996 /* verify the key details match the required command's expectations */
Ayala Beker8e160ab2016-04-11 11:37:38 +03002997 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
2998 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
2999 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3000 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3001 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3002 return -EINVAL;
3003
3004 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3005 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
Johannes Berg8ca151b2013-01-24 14:25:36 +01003006 return -EINVAL;
3007
3008 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3009 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3010
3011 if (remove_key) {
3012 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3013 } else {
3014 struct ieee80211_key_seq seq;
3015 const u8 *pn;
3016
Ayala Bekeraa950522016-06-01 00:28:09 +03003017 switch (keyconf->cipher) {
3018 case WLAN_CIPHER_SUITE_AES_CMAC:
3019 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3020 break;
Ayala Beker8e160ab2016-04-11 11:37:38 +03003021 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3022 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3023 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3024 break;
Ayala Bekeraa950522016-06-01 00:28:09 +03003025 default:
3026 return -EINVAL;
3027 }
3028
Ayala Beker8e160ab2016-04-11 11:37:38 +03003029 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3030 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3031 igtk_cmd.ctrl_flags |=
3032 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003033 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3034 pn = seq.aes_cmac.pn;
3035 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3036 ((u64) pn[4] << 8) |
3037 ((u64) pn[3] << 16) |
3038 ((u64) pn[2] << 24) |
3039 ((u64) pn[1] << 32) |
3040 ((u64) pn[0] << 40));
3041 }
3042
3043 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3044 remove_key ? "removing" : "installing",
3045 igtk_cmd.sta_id);
3046
Ayala Beker8e160ab2016-04-11 11:37:38 +03003047 if (!iwl_mvm_has_new_rx_api(mvm)) {
3048 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3049 .ctrl_flags = igtk_cmd.ctrl_flags,
3050 .key_id = igtk_cmd.key_id,
3051 .sta_id = igtk_cmd.sta_id,
3052 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3053 };
3054
3055 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3056 ARRAY_SIZE(igtk_cmd_v1.igtk));
3057 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3058 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3059 }
Emmanuel Grumbacha1022922014-05-12 11:36:41 +03003060 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003061 sizeof(igtk_cmd), &igtk_cmd);
3062}
3063
3064
3065static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3066 struct ieee80211_vif *vif,
3067 struct ieee80211_sta *sta)
3068{
Johannes Berg5b530e92014-12-23 16:00:17 +01003069 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003070
3071 if (sta)
3072 return sta->addr;
3073
3074 if (vif->type == NL80211_IFTYPE_STATION &&
Sara Sharon0ae98812017-01-04 14:53:58 +02003075 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003076 u8 sta_id = mvmvif->ap_sta_id;
3077 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3078 lockdep_is_held(&mvm->mutex));
3079 return sta->addr;
3080 }
3081
3082
3083 return NULL;
3084}
3085
Johannes Berg2f6319d2014-11-12 23:39:56 +01003086static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3087 struct ieee80211_vif *vif,
3088 struct ieee80211_sta *sta,
Johannes Bergba3943b2014-11-12 23:54:48 +01003089 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003090 u8 key_offset,
Johannes Bergba3943b2014-11-12 23:54:48 +01003091 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003092{
Johannes Berg2f6319d2014-11-12 23:39:56 +01003093 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003094 int ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003095 const u8 *addr;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003096 struct ieee80211_key_seq seq;
3097 u16 p1k[5];
3098
Johannes Berg8ca151b2013-01-24 14:25:36 +01003099 switch (keyconf->cipher) {
3100 case WLAN_CIPHER_SUITE_TKIP:
3101 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3102 /* get phase 1 key from mac80211 */
3103 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3104 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
Johannes Bergba3943b2014-11-12 23:54:48 +01003105 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003106 seq.tkip.iv32, p1k, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003107 break;
3108 case WLAN_CIPHER_SUITE_CCMP:
Johannes Bergba3943b2014-11-12 23:54:48 +01003109 case WLAN_CIPHER_SUITE_WEP40:
3110 case WLAN_CIPHER_SUITE_WEP104:
Ayala Beker2a53d162016-04-07 16:21:57 +03003111 case WLAN_CIPHER_SUITE_GCMP:
3112 case WLAN_CIPHER_SUITE_GCMP_256:
Johannes Bergba3943b2014-11-12 23:54:48 +01003113 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003114 0, NULL, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003115 break;
3116 default:
Johannes Bergba3943b2014-11-12 23:54:48 +01003117 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003118 0, NULL, 0, key_offset);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003119 }
3120
Johannes Berg8ca151b2013-01-24 14:25:36 +01003121 return ret;
3122}
3123
Johannes Berg2f6319d2014-11-12 23:39:56 +01003124static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
Johannes Bergba3943b2014-11-12 23:54:48 +01003125 struct ieee80211_key_conf *keyconf,
3126 bool mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003127{
Sara Sharon45c458b2016-11-09 15:43:26 +02003128 union {
3129 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3130 struct iwl_mvm_add_sta_key_cmd cmd;
3131 } u = {};
3132 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3133 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003134 __le16 key_flags;
Sara Sharon45c458b2016-11-09 15:43:26 +02003135 int ret, size;
Johannes Berg79920742014-11-03 15:43:04 +01003136 u32 status;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003137
Emmanuel Grumbach8115efb2013-02-05 10:08:35 +02003138 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3139 STA_KEY_FLG_KEYID_MSK);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003140 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3141 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3142
Johannes Bergba3943b2014-11-12 23:54:48 +01003143 if (mcast)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003144 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3145
Sara Sharon45c458b2016-11-09 15:43:26 +02003146 /*
3147 * The fields assigned here are in the same location at the start
3148 * of the command, so we can do this union trick.
3149 */
3150 u.cmd.common.key_flags = key_flags;
3151 u.cmd.common.key_offset = keyconf->hw_key_idx;
3152 u.cmd.common.sta_id = sta_id;
3153
3154 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003155
Johannes Berg8ca151b2013-01-24 14:25:36 +01003156 status = ADD_STA_SUCCESS;
Sara Sharon45c458b2016-11-09 15:43:26 +02003157 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3158 &status);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003159
3160 switch (status) {
3161 case ADD_STA_SUCCESS:
3162 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3163 break;
3164 default:
3165 ret = -EIO;
3166 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3167 break;
3168 }
3169
3170 return ret;
3171}
3172
Johannes Berg2f6319d2014-11-12 23:39:56 +01003173int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3174 struct ieee80211_vif *vif,
3175 struct ieee80211_sta *sta,
3176 struct ieee80211_key_conf *keyconf,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003177 u8 key_offset)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003178{
Johannes Bergba3943b2014-11-12 23:54:48 +01003179 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003180 struct iwl_mvm_sta *mvm_sta;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003181 u8 sta_id;
3182 int ret;
Matti Gottlieb11828db2015-06-01 15:15:11 +03003183 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
Johannes Berg2f6319d2014-11-12 23:39:56 +01003184
3185 lockdep_assert_held(&mvm->mutex);
3186
3187 /* Get the station id from the mvm local station table */
Johannes Berg5f7a1842015-12-11 09:36:10 +01003188 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3189 if (!mvm_sta) {
3190 IWL_ERR(mvm, "Failed to find station\n");
Johannes Berg2f6319d2014-11-12 23:39:56 +01003191 return -EINVAL;
3192 }
Johannes Berg5f7a1842015-12-11 09:36:10 +01003193 sta_id = mvm_sta->sta_id;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003194
Ayala Beker8e160ab2016-04-11 11:37:38 +03003195 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3196 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3197 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01003198 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3199 goto end;
3200 }
3201
3202 /*
3203 * It is possible that the 'sta' parameter is NULL, and thus
3204 * there is a need to retrieve the sta from the local station table.
3205 */
3206 if (!sta) {
3207 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3208 lockdep_is_held(&mvm->mutex));
3209 if (IS_ERR_OR_NULL(sta)) {
3210 IWL_ERR(mvm, "Invalid station id\n");
3211 return -EINVAL;
3212 }
3213 }
3214
3215 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3216 return -EINVAL;
3217
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003218 /* If the key_offset is not pre-assigned, we need to find a
3219 * new offset to use. In normal cases, the offset is not
3220 * pre-assigned, but during HW_RESTART we want to reuse the
3221 * same indices, so we pass them when this function is called.
3222 *
3223 * In D3 entry, we need to hardcoded the indices (because the
3224 * firmware hardcodes the PTK offset to 0). In this case, we
3225 * need to make sure we don't overwrite the hw_key_idx in the
3226 * keyconf structure, because otherwise we cannot configure
3227 * the original ones back when resuming.
3228 */
3229 if (key_offset == STA_KEY_IDX_INVALID) {
3230 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3231 if (key_offset == STA_KEY_IDX_INVALID)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003232 return -ENOSPC;
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003233 keyconf->hw_key_idx = key_offset;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003234 }
3235
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003236 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003237 if (ret)
Johannes Bergba3943b2014-11-12 23:54:48 +01003238 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003239
3240 /*
3241 * For WEP, the same key is used for multicast and unicast. Upload it
3242 * again, using the same key offset, and now pointing the other one
3243 * to the same key slot (offset).
3244 * If this fails, remove the original as well.
3245 */
3246 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3247 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003248 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3249 key_offset, !mcast);
Johannes Bergba3943b2014-11-12 23:54:48 +01003250 if (ret) {
Johannes Bergba3943b2014-11-12 23:54:48 +01003251 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003252 goto end;
Johannes Bergba3943b2014-11-12 23:54:48 +01003253 }
3254 }
Johannes Berg2f6319d2014-11-12 23:39:56 +01003255
Luca Coelho9c3deeb2015-11-11 01:06:17 +02003256 __set_bit(key_offset, mvm->fw_key_table);
3257
Johannes Berg2f6319d2014-11-12 23:39:56 +01003258end:
3259 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3260 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
Matti Gottlieb11828db2015-06-01 15:15:11 +03003261 sta ? sta->addr : zero_addr, ret);
Johannes Berg2f6319d2014-11-12 23:39:56 +01003262 return ret;
3263}
3264
3265int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3266 struct ieee80211_vif *vif,
3267 struct ieee80211_sta *sta,
3268 struct ieee80211_key_conf *keyconf)
3269{
Johannes Bergba3943b2014-11-12 23:54:48 +01003270 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg5f7a1842015-12-11 09:36:10 +01003271 struct iwl_mvm_sta *mvm_sta;
Sara Sharon0ae98812017-01-04 14:53:58 +02003272 u8 sta_id = IWL_MVM_INVALID_STA;
Johannes Berg2dc2a152015-06-16 17:09:18 +02003273 int ret, i;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003274
3275 lockdep_assert_held(&mvm->mutex);
3276
Johannes Berg5f7a1842015-12-11 09:36:10 +01003277 /* Get the station from the mvm local station table */
3278 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
Ilan Peercd4d23c2017-01-16 15:07:03 +02003279 if (!mvm_sta) {
3280 IWL_ERR(mvm, "Failed to find station\n");
3281 return -EINVAL;
3282 }
3283 sta_id = mvm_sta->sta_id;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003284
3285 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3286 keyconf->keyidx, sta_id);
3287
Ayala Beker8e160ab2016-04-11 11:37:38 +03003288 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3289 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3290 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
Johannes Berg2f6319d2014-11-12 23:39:56 +01003291 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3292
3293 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3294 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3295 keyconf->hw_key_idx);
3296 return -ENOENT;
3297 }
3298
Johannes Berg2dc2a152015-06-16 17:09:18 +02003299 /* track which key was deleted last */
3300 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3301 if (mvm->fw_key_deleted[i] < U8_MAX)
3302 mvm->fw_key_deleted[i]++;
3303 }
3304 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3305
Johannes Berg5f7a1842015-12-11 09:36:10 +01003306 if (!mvm_sta) {
Johannes Berg2f6319d2014-11-12 23:39:56 +01003307 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3308 return 0;
3309 }
3310
Johannes Bergba3943b2014-11-12 23:54:48 +01003311 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3312 if (ret)
3313 return ret;
3314
3315 /* delete WEP key twice to get rid of (now useless) offset */
3316 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3317 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3318 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3319
3320 return ret;
Johannes Berg2f6319d2014-11-12 23:39:56 +01003321}
3322
Johannes Berg8ca151b2013-01-24 14:25:36 +01003323void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3324 struct ieee80211_vif *vif,
3325 struct ieee80211_key_conf *keyconf,
3326 struct ieee80211_sta *sta, u32 iv32,
3327 u16 *phase1key)
3328{
Beni Levc3eb5362013-02-06 17:22:18 +02003329 struct iwl_mvm_sta *mvm_sta;
Johannes Bergba3943b2014-11-12 23:54:48 +01003330 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003331
Beni Levc3eb5362013-02-06 17:22:18 +02003332 rcu_read_lock();
3333
Johannes Berg5f7a1842015-12-11 09:36:10 +01003334 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3335 if (WARN_ON_ONCE(!mvm_sta))
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003336 goto unlock;
Johannes Bergba3943b2014-11-12 23:54:48 +01003337 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
Luca Coelhod6ee54a2015-11-10 22:13:43 +02003338 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
Emmanuel Grumbach12f17212015-12-20 14:48:08 +02003339
3340 unlock:
Beni Levc3eb5362013-02-06 17:22:18 +02003341 rcu_read_unlock();
Johannes Berg8ca151b2013-01-24 14:25:36 +01003342}
3343
Johannes Berg9cc40712013-02-15 22:47:48 +01003344void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3345 struct ieee80211_sta *sta)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003346{
Johannes Berg5b577a92013-11-14 18:20:04 +01003347 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003348 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003349 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003350 .sta_id = mvmsta->sta_id,
Emmanuel Grumbach5af01772013-06-09 12:59:24 +03003351 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
Johannes Berg9cc40712013-02-15 22:47:48 +01003352 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003353 };
3354 int ret;
3355
Sara Sharon854c5702016-01-26 13:17:47 +02003356 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3357 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003358 if (ret)
3359 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3360}
3361
Johannes Berg9cc40712013-02-15 22:47:48 +01003362void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3363 struct ieee80211_sta *sta,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003364 enum ieee80211_frame_release_type reason,
Johannes Berg3e56ead2013-02-15 22:23:18 +01003365 u16 cnt, u16 tids, bool more_data,
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003366 bool single_sta_queue)
Johannes Berg8ca151b2013-01-24 14:25:36 +01003367{
Johannes Berg5b577a92013-11-14 18:20:04 +01003368 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
Emmanuel Grumbachf9dc0002014-03-30 09:53:27 +03003369 struct iwl_mvm_add_sta_cmd cmd = {
Johannes Berg8ca151b2013-01-24 14:25:36 +01003370 .add_modify = STA_MODE_MODIFY,
Johannes Berg9cc40712013-02-15 22:47:48 +01003371 .sta_id = mvmsta->sta_id,
Johannes Berg8ca151b2013-01-24 14:25:36 +01003372 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3373 .sleep_tx_count = cpu_to_le16(cnt),
Johannes Berg9cc40712013-02-15 22:47:48 +01003374 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
Johannes Berg8ca151b2013-01-24 14:25:36 +01003375 };
Johannes Berg3e56ead2013-02-15 22:23:18 +01003376 int tid, ret;
3377 unsigned long _tids = tids;
Johannes Berg8ca151b2013-01-24 14:25:36 +01003378
Johannes Berg3e56ead2013-02-15 22:23:18 +01003379 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3380 * Note that this field is reserved and unused by firmware not
3381 * supporting GO uAPSD, so it's safe to always do this.
3382 */
3383 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3384 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3385
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003386 /* If we're releasing frames from aggregation or dqa queues then check
3387 * if all the queues that we're releasing frames from, combined, have:
Johannes Berg3e56ead2013-02-15 22:23:18 +01003388 * - more frames than the service period, in which case more_data
3389 * needs to be set
3390 * - fewer than 'cnt' frames, in which case we need to adjust the
3391 * firmware command (but do that unconditionally)
3392 */
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003393 if (single_sta_queue) {
Johannes Berg3e56ead2013-02-15 22:23:18 +01003394 int remaining = cnt;
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003395 int sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003396
3397 spin_lock_bh(&mvmsta->lock);
3398 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3399 struct iwl_mvm_tid_data *tid_data;
3400 u16 n_queued;
3401
3402 tid_data = &mvmsta->tid_data[tid];
Sara Sharon9a3fcf92017-03-14 09:50:35 +02003403 if (WARN(!iwl_mvm_is_dqa_supported(mvm) &&
3404 tid_data->state != IWL_AGG_ON &&
Johannes Berg3e56ead2013-02-15 22:23:18 +01003405 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
3406 "TID %d state is %d\n",
3407 tid, tid_data->state)) {
3408 spin_unlock_bh(&mvmsta->lock);
3409 ieee80211_sta_eosp(sta);
3410 return;
3411 }
3412
3413 n_queued = iwl_mvm_tid_queued(tid_data);
3414 if (n_queued > remaining) {
3415 more_data = true;
3416 remaining = 0;
3417 break;
3418 }
3419 remaining -= n_queued;
3420 }
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003421 sleep_tx_count = cnt - remaining;
3422 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3423 mvmsta->sleep_tx_count = sleep_tx_count;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003424 spin_unlock_bh(&mvmsta->lock);
3425
Emmanuel Grumbach36be0eb2015-11-05 10:32:31 +02003426 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
Johannes Berg3e56ead2013-02-15 22:23:18 +01003427 if (WARN_ON(cnt - remaining == 0)) {
3428 ieee80211_sta_eosp(sta);
3429 return;
3430 }
3431 }
3432
3433 /* Note: this is ignored by firmware not supporting GO uAPSD */
3434 if (more_data)
3435 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
3436
3437 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3438 mvmsta->next_status_eosp = true;
3439 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
3440 } else {
3441 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
3442 }
3443
Emmanuel Grumbach156f92f2015-11-24 14:55:18 +02003444 /* block the Tx queues until the FW updated the sleep Tx count */
3445 iwl_trans_block_txq_ptrs(mvm->trans, true);
3446
3447 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3448 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
Sara Sharon854c5702016-01-26 13:17:47 +02003449 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Johannes Berg8ca151b2013-01-24 14:25:36 +01003450 if (ret)
3451 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3452}
Johannes Berg3e56ead2013-02-15 22:23:18 +01003453
Johannes Berg04168412015-06-23 21:22:09 +02003454void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3455 struct iwl_rx_cmd_buffer *rxb)
Johannes Berg3e56ead2013-02-15 22:23:18 +01003456{
3457 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3458 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3459 struct ieee80211_sta *sta;
3460 u32 sta_id = le32_to_cpu(notif->sta_id);
3461
3462 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
Johannes Berg04168412015-06-23 21:22:09 +02003463 return;
Johannes Berg3e56ead2013-02-15 22:23:18 +01003464
3465 rcu_read_lock();
3466 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3467 if (!IS_ERR_OR_NULL(sta))
3468 ieee80211_sta_eosp(sta);
3469 rcu_read_unlock();
Johannes Berg3e56ead2013-02-15 22:23:18 +01003470}
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003471
3472void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3473 struct iwl_mvm_sta *mvmsta, bool disable)
3474{
3475 struct iwl_mvm_add_sta_cmd cmd = {
3476 .add_modify = STA_MODE_MODIFY,
3477 .sta_id = mvmsta->sta_id,
3478 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3479 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3480 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3481 };
3482 int ret;
3483
Sara Sharon854c5702016-01-26 13:17:47 +02003484 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3485 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
Andrei Otcheretianski09b0ce12014-05-25 17:07:38 +03003486 if (ret)
3487 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3488}
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003489
3490void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3491 struct ieee80211_sta *sta,
3492 bool disable)
3493{
3494 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3495
3496 spin_lock_bh(&mvm_sta->lock);
3497
3498 if (mvm_sta->disable_tx == disable) {
3499 spin_unlock_bh(&mvm_sta->lock);
3500 return;
3501 }
3502
3503 mvm_sta->disable_tx = disable;
3504
3505 /*
Sara Sharon0d365ae2015-03-31 12:24:05 +03003506 * Tell mac80211 to start/stop queuing tx for this station,
3507 * but don't stop queuing if there are still pending frames
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003508 * for this station.
3509 */
3510 if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
3511 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3512
3513 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3514
3515 spin_unlock_bh(&mvm_sta->lock);
3516}
3517
3518void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3519 struct iwl_mvm_vif *mvmvif,
3520 bool disable)
3521{
3522 struct ieee80211_sta *sta;
3523 struct iwl_mvm_sta *mvm_sta;
3524 int i;
3525
3526 lockdep_assert_held(&mvm->mutex);
3527
3528 /* Block/unblock all the stations of the given mvmvif */
Sara Sharon0ae98812017-01-04 14:53:58 +02003529 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
Andrei Otcheretianski003e52362014-05-25 17:24:22 +03003530 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3531 lockdep_is_held(&mvm->mutex));
3532 if (IS_ERR_OR_NULL(sta))
3533 continue;
3534
3535 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3536 if (mvm_sta->mac_id_n_color !=
3537 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3538 continue;
3539
3540 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3541 }
3542}
Luciano Coelhodc88b4b2014-11-10 11:10:14 +02003543
3544void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3545{
3546 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3547 struct iwl_mvm_sta *mvmsta;
3548
3549 rcu_read_lock();
3550
3551 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3552
3553 if (!WARN_ON(!mvmsta))
3554 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3555
3556 rcu_read_unlock();
3557}